code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase : Optional[List[str]] = None
__lowercase : Tuple = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase : str = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[str] = None
# Automatically constructed
__lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
__lowerCamelCase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__lowerCamelCase : str = field(default='''Image''' , init=snake_case , repr=snake_case )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_ ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
snake_case : str = {}
snake_case , snake_case : Optional[int] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
snake_case : List[str] = path.split("""::""" )[-1]
try:
snake_case : Tuple = string_to_dict(SCREAMING_SNAKE_CASE_ ,config.HUB_DATASETS_URL )["""repo_id"""]
snake_case : List[str] = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
snake_case : Optional[int] = None
with xopen(SCREAMING_SNAKE_CASE_ ,"""rb""" ,use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
snake_case : int = BytesIO(f.read() )
snake_case : Union[str, Any] = PIL.Image.open(bytes_ )
else:
snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
snake_case : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.binary() )
snake_case : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case : Union[str, Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : Tuple = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case : Any = storage.field("""bytes""" )
else:
snake_case : Dict = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case : Union[str, Any] = storage.field("""path""" )
else:
snake_case : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
snake_case : List[str] = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
snake_case : Tuple = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) ,type=pa.string() )
snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ ,self.pa_type )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ ):
with xopen(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as f:
snake_case : Optional[int] = f.read()
return bytes_
snake_case : Union[str, Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
snake_case : Any = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
snake_case : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ ,self.pa_type )
def lowercase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
snake_case : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase ( __A : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
snake_case : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
snake_case : Tuple = image.format
else:
snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__A , format=__A )
return buffer.getvalue()
def lowercase ( __A : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__A , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def lowercase ( __A : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
snake_case : List[Any] = array.dtype
snake_case : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
snake_case : str = dtype.kind
snake_case : Optional[int] = dtype.itemsize
snake_case : Union[str, Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
snake_case : str = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
snake_case : Tuple = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
snake_case : Any = dtype_byteorder + dtype_kind + str(__A )
snake_case : Dict = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
snake_case : Optional[Any] = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def lowercase ( __A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
snake_case , snake_case : Optional[int] = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
snake_case : List[Any] = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
snake_case : Tuple = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 36
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36
| 1
|
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 23
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A : Union[str, Any] = 1_6
A : Union[str, Any] = 3_2
def __lowerCAmelCase ( a__ , a__ = 16 , a__ = "bert-base-cased" ) -> List[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(a__ ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
a__ , batched=a__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=a__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(a__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(a__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( a__ , a__ ) -> List[Any]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(a__ )
__a , __a = get_dataloaders(a__ , a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(a__ , return_dict=a__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=a__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(a__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=0 , num_training_steps=a__ , )
else:
__a = DummyScheduler(a__ , total_num_steps=a__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(a__ , a__ ):
model.train()
for step, batch in enumerate(a__ ):
__a = model(**a__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**a__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ , references=a__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(a__ , a__ )
def __lowerCAmelCase ( ) -> Optional[Any]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=a__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a__ , )
parser.add_argument(
'''--output_dir''' , type=a__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=a__ , default=a__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=a__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 219
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Any = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 219
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case : Tuple = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__snake_case : Union[str, Any] = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
__lowercase : int = BartTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
A_ = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
A_ = add_prefix_space
A_ = pre_tok_class(**_SCREAMING_SNAKE_CASE )
A_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ = '''post_processor'''
A_ = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
A_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ = tuple(state['''sep'''] )
if "cls" in state:
A_ = tuple(state['''cls'''] )
A_ = False
if state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
A_ = add_prefix_space
A_ = True
if state.get('''trim_offsets''' , _SCREAMING_SNAKE_CASE ) != trim_offsets:
A_ = trim_offsets
A_ = True
if changes_to_apply:
A_ = getattr(_SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
A_ = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
A_ = value
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
A_ = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
A_ = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 174
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
A_ = defaultdict(_UpperCamelCase )
A_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, _UpperCamelCase, 2 ):
if gcd(_UpperCamelCase, _UpperCamelCase ) > 1:
continue
A_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase, limit + 1, _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 1
|
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("Input value must be an 'int' type" )
a__ : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class __snake_case (_a ):
lowerCAmelCase__ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"audio": Audio()} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "audio"
lowerCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : Any = copy.deepcopy(self )
_lowerCAmelCase : Tuple = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 429
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :Dict = {'vocab_file': 'spiece.model'}
lowerCamelCase :Optional[Any] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCamelCase :Any = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase ( __snake_case ):
a: Dict = VOCAB_FILES_NAMES
a: List[str] = PRETRAINED_VOCAB_FILES_MAP
a: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: int = ["input_ids", "attention_mask"]
def __init__( self: int , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[Any]=False , __UpperCamelCase: Any=False , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: str=None , __UpperCamelCase: List[str]=None , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: Optional[Dict[str, Any]] = None , **__UpperCamelCase: Optional[Any] , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a = '''<|endoftext|>''' if eos_token is None else eos_token
_a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a = unk_token if pad_token is None else pad_token
_a = eos_token if bos_token is None else bos_token
else:
_a = '''<pad>''' if pad_token is None else pad_token
_a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
_a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a = re.compile(
f"[{''.join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self: List[Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self: Union[str, Any] , __UpperCamelCase: List[Any] ):
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _A ( self: Any ):
return len(self.sp_model )
def _A ( self: Union[str, Any] , __UpperCamelCase: str ):
_a = self.non_printing_characters_re.sub('''''' , __UpperCamelCase )
# Normalize whitespaces
_a = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_a = unicodedata.normalize('''NFC''' , __UpperCamelCase )
return text
def _A ( self: List[Any] , __UpperCamelCase: str , **__UpperCamelCase: Tuple ):
_a = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _A ( self: Union[str, Any] , __UpperCamelCase: str ):
return self.sp_model.PieceToId(__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: int ):
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def _A ( __UpperCamelCase: str ):
return out_string
def _A ( self: List[str] , __UpperCamelCase: List[str] ):
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__UpperCamelCase )
_a = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def _A ( self: str ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: Optional[int] , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _A ( self: List[Any] , __UpperCamelCase: Union[str, List[str]] , __UpperCamelCase: Union[str, bool] = False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = self.preprocess_text(__UpperCamelCase )
_a = self.sp_model.encode(__UpperCamelCase )
else:
_a = [self.preprocess_text(__UpperCamelCase ) for t in text]
_a = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
_a = torch.tensor(__UpperCamelCase )
return token_ids
def _A ( self: Any , __UpperCamelCase: Union[int, List[int]] ):
return self.sp_model.decode(__UpperCamelCase )
def _A ( self: List[Any] , __UpperCamelCase: "Conversation" ):
_a = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
_a = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(__UpperCamelCase ) + f"{self.bos_token}Bot:"
)
return self.encode(text=__UpperCamelCase )
| 346
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
| 1
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCAmelCase = float('nan')
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
a_ : int = sys.stdout
a_ : Any = open(__SCREAMING_SNAKE_CASE , '''a''' )
def __getattr__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
return getattr(self.stdout , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
self.stdout.write(__SCREAMING_SNAKE_CASE )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , __SCREAMING_SNAKE_CASE , 0 , re.M ) )
def _UpperCAmelCase ( __A : List[str]=80 , __A : str=False ):
a_ : Optional[int] = []
# deal with critical env vars
a_ : Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
a_ : Tuple = os.environ.get(__A , __A )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
a_ : Dict = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__A )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a_ : Union[str, Any] = []
a_ : int = ''''''
while len(__A ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(__A ) == 0 or len(__A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__A )
a_ : Optional[Any] = ''''''
return "\\\n".join(__A )
def _UpperCAmelCase ( __A : Tuple , __A : List[Any] ):
# unwrap multi-line input
a_ : int = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
a_ : str = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
a_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase ( __A : Dict , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict , __A : List[Any] , __A : List[str] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
a_ : List[str] = subprocess.run(__A , capture_output=__A , text=__A )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
a_ : Union[str, Any] = variation.replace(''' ''' , '''-''' )
with open(Path(__A ) / f'log.{prefix}.stdout.txt' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__A ) / f'log.{prefix}.stderr.txt' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , '''r''' , encoding='''utf-8''' ) as f:
a_ : Union[str, Any] = json.load(__A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase ( __A : Tuple , __A : List[str] , __A : Dict , __A : Dict , __A : int , __A : str , __A : Dict , __A : Any , __A : int , __A : str , ):
a_ : Optional[int] = []
a_ : Dict = []
a_ : Tuple = f'{id}: {variation:<{longest_variation_len}}'
a_ : Dict = f'{preamble}: '
a_ : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__A ) , desc=__A , leave=__A ):
a_ : Optional[int] = process_run_single(
__A , __A , __A , __A , __A , __A , __A )
a_ : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(__A ):
metrics.append(__A )
results.append(__A )
outcome += "✓"
else:
outcome += "✘"
a_ : List[str] = f'\33[2K\r{outcome}'
if len(__A ) > 0:
a_ : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a_ : Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
a_ : Tuple = f'{outcome} {mean_target}'
if len(__A ) > 1:
results_str += f' {tuple(round(__A , 2 ) for x in results )}'
print(__A )
a_ : Optional[int] = variation
return mean_metrics
else:
print(__A )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ( ):
a_ : Dict = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def _UpperCAmelCase ( __A : Optional[int] , __A : Optional[Any] , __A : Dict , __A : List[Any] , __A : int ):
a_ : Dict = pd.DataFrame(__A )
a_ : int = '''variation'''
a_ : List[Any] = '''diff_%'''
a_ : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a_ : List[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__A ):
# as a fallback, use the minimal value as the sentinel
a_ : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__A ):
a_ : Dict = df.apply(
lambda __A : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
a_ : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a_ : Tuple = df.reindex(__A , axis='''columns''' ) # reorder cols
# capitalize
a_ : Tuple = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
a_ : int = df.rename(lambda __A : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
a_ : List[str] = df.rename(lambda __A : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
a_ : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__A , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__A , floatfmt='''.2f''' )]
print('''\n\n'''.join(__A ) )
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__A , type=__A , required=__A , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__A , type=__A , nargs='''+''' , required=__A , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__A , type=__A , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__A , type=__A , required=__A , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__A , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__A , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__A , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__A , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
a_ : Tuple = parser.parse_args()
a_ : Any = args.output_dir
Path(__A ).mkdir(exist_ok=__A )
a_ : Any = get_base_command(__A , __A )
# split each dimension into its --foo variations
a_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , __A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a_ : Tuple = list(map(str.strip , map(''' '''.join , itertools.product(*__A ) ) ) )
a_ : str = max(len(__A ) for x in variations )
# split wanted keys
a_ : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
a_ : List[str] = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
a_ : int = Tee(__A )
print(f'\n*** Running {len(__A )} benchmarks:' )
print(f'Base command: {" ".join(__A )}' )
a_ : Optional[int] = '''variation'''
a_ : List[str] = []
for id, variation in enumerate(tqdm(__A , desc='''Total completion: ''' , leave=__A ) ):
a_ : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __A , __A , __A , __A , args.target_metric_key , __A , args.repeat_times , __A , args.verbose , ) )
process_results(__A , args.target_metric_key , __A , args.base_variation , __A )
if __name__ == "__main__":
main()
| 466
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "openai/whisper-base"
snake_case__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
snake_case__ = "transcriber"
snake_case__ = WhisperProcessor
snake_case__ = WhisperForConditionalGeneration
snake_case__ = ["audio"]
snake_case__ = ["text"]
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
return self.model.generate(inputs=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
| 466
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__UpperCAmelCase : Optional[Any] = '''facebook/wmt19-en-de'''
__UpperCAmelCase : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__UpperCAmelCase : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__UpperCAmelCase : str = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
__UpperCAmelCase : Dict = tokenizer(["Making tiny model"], return_tensors="pt")
__UpperCAmelCase : Dict = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__UpperCAmelCase : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155
| 0
|
lowerCamelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case_ ( lowerCAmelCase_ : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(lowerCAmelCase_ )
__lowercase : int = """""".join(bin(lowerCAmelCase_ )[2:].zfill(8 ) for byte in data )
__lowercase : Optional[int] = len(lowerCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase : str = b"""=""" * ((6 - len(lowerCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase_ ) % 6)
else:
__lowercase : Any = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def snake_case_ ( lowerCAmelCase_ : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = (
"""argument should be a bytes-like object or ASCII string, """
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(lowerCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
__lowercase : int = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__lowercase : List[str] = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase : Dict = encoded_data[:-padding]
__lowercase : Any = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase : Any = """""".join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase_ ) , 8 )
]
return bytes(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case_ ( lowerCAmelCase_ : ndarray ):
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ) -> None:
"""simple docstring"""
__lowercase : Any = regularization
__lowercase : List[str] = gamma
if kernel == "linear":
__lowercase : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
__lowercase : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__lowercase : List[Any] = F"Unknown kernel: {kernel}"
raise ValueError(__a )
def lowerCAmelCase ( self : int , __a : ndarray , __a : ndarray ) -> float:
"""simple docstring"""
return np.dot(__a , __a )
def lowerCAmelCase ( self : str , __a : ndarray , __a : ndarray ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : Optional[int] , __a : list[ndarray] , __a : ndarray ) -> None:
"""simple docstring"""
__lowercase : List[Any] = observations
__lowercase : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__lowercase) , ) : Union[str, Any] = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
__lowercase : str = 0
((__lowercase) , ) : Tuple = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
__lowercase : Tuple = LinearConstraint(__a , 0 , 0 )
__lowercase : List[Any] = Bounds(0 , self.regularization )
__lowercase : Dict = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
__lowercase : str = l_star
# calculating mean offset of separation plane to points
__lowercase : Optional[Any] = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__lowercase : Any = s / n
def lowerCAmelCase ( self : Any , __a : ndarray ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( a_ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowerCAmelCase__ ( a_ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowerCAmelCase__ ( a_ : str = "" ) -> bool:
if len(a_ ) == 0:
return True
UpperCAmelCase__ : int = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase__ : dict[str, int] = {}
for character in lower_case_input_str:
UpperCAmelCase__ : Optional[Any] = character_freq_dict.get(a_ , 0 ) + 1
UpperCAmelCase__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase__ ( a_ : str = "" ) -> None:
print('''\nFor string = ''' , a_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(a_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(a_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
UpperCamelCase_ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCamelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 599
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Optional[int] = '''levit'''
def __init__( self , lowercase=2_2_4 , lowercase=3 , lowercase=3 , lowercase=2 , lowercase=1 , lowercase=1_6 , lowercase=[1_2_8, 2_5_6, 3_8_4] , lowercase=[4, 8, 1_2] , lowercase=[4, 4, 4] , lowercase=[1_6, 1_6, 1_6] , lowercase=0 , lowercase=[2, 2, 2] , lowercase=[2, 2, 2] , lowercase=0.0_2 , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : Any = kernel_size
__SCREAMING_SNAKE_CASE : Optional[Any] = stride
__SCREAMING_SNAKE_CASE : List[str] = padding
__SCREAMING_SNAKE_CASE : str = hidden_sizes
__SCREAMING_SNAKE_CASE : int = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : str = key_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE : int = patch_size
__SCREAMING_SNAKE_CASE : int = attention_ratio
__SCREAMING_SNAKE_CASE : Any = mlp_ratio
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : List[str] = version.parse('''1.11''' )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ) -> float:
'''simple docstring'''
return 1e-4
| 158
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowercase ) , lowercase )
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase ) , x.transpose() ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Any = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
__SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : int = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , np.asarray(transpose(lowercase ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase , axes=(1, 2, 0) ) ) ) )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.reshape(lowercase , (4, 3) ) ) )
__SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , np.reshape(lowercase , (1_2, 5) ) ) )
@require_torch
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , reshape(lowercase , (1_2, 5) ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : List[Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : List[str] = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , reshape(lowercase , (1_2, 5) ).numpy() ) )
@require_flax
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : str = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.asarray(reshape(lowercase , (4, 3) ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : List[str] = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (1_2, 5) ) , np.asarray(reshape(lowercase , (1_2, 5) ) ) ) )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.squeeze(lowercase ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.squeeze(lowercase , axis=2 ) ) )
@require_torch
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
__SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_flax
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : int = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.asarray(squeeze(lowercase ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : Any = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.asarray(squeeze(lowercase , axis=2 ) ) ) )
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.expand_dims(lowercase , axis=1 ) ) )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_tf
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Any = tf.constant(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_flax
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.asarray(expand_dims(lowercase , axis=1 ) ) ) )
| 158
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Any ) -> Any:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''prompt''']
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
if "image" in inputs:
_UpperCamelCase =inputs['''image''']
else:
_UpperCamelCase =None
if "mask_image" in inputs:
_UpperCamelCase =inputs['''mask_image''']
else:
_UpperCamelCase =None
if "original_image" in inputs:
_UpperCamelCase =inputs['''original_image''']
else:
_UpperCamelCase =None
_UpperCamelCase , _UpperCamelCase =pipe.encode_prompt(UpperCamelCase__ )
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
| 271
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Any ) -> Any:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''prompt''']
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
if "image" in inputs:
_UpperCamelCase =inputs['''image''']
else:
_UpperCamelCase =None
if "mask_image" in inputs:
_UpperCamelCase =inputs['''mask_image''']
else:
_UpperCamelCase =None
if "original_image" in inputs:
_UpperCamelCase =inputs['''original_image''']
else:
_UpperCamelCase =None
_UpperCamelCase , _UpperCamelCase =pipe.encode_prompt(UpperCamelCase__ )
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
| 271
| 1
|
'''simple docstring'''
from PIL import Image
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
def brightness(UpperCamelCase__ : Any ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__lowerCAmelCase : str = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 262
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ = torch.manual_seed(0 )
A__ = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 337
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_lowercase = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['DPTFeatureExtractor']
_lowercase = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=128 , lowerCAmelCase=32 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: Union[str, Any]= batch_size
SCREAMING_SNAKE_CASE__: Any= seq_length
SCREAMING_SNAKE_CASE__: Optional[int]= is_training
SCREAMING_SNAKE_CASE__: List[Any]= use_input_mask
SCREAMING_SNAKE_CASE__: Dict= use_token_type_ids
SCREAMING_SNAKE_CASE__: Any= use_labels
SCREAMING_SNAKE_CASE__: Optional[Any]= vocab_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= hidden_size
SCREAMING_SNAKE_CASE__: Any= num_hidden_layers
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: List[str]= intermediate_size
SCREAMING_SNAKE_CASE__: str= hidden_act
SCREAMING_SNAKE_CASE__: str= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= max_position_embeddings
SCREAMING_SNAKE_CASE__: Optional[Any]= type_vocab_size
SCREAMING_SNAKE_CASE__: Optional[Any]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[str]= initializer_range
SCREAMING_SNAKE_CASE__: Optional[Any]= num_labels
SCREAMING_SNAKE_CASE__: List[str]= num_choices
SCREAMING_SNAKE_CASE__: Optional[int]= scope
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: List[Any]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: int= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self ) -> str:
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): int= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: str= True
SCREAMING_SNAKE_CASE__: Any= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= NezhaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= True
SCREAMING_SNAKE_CASE__: Optional[int]= NezhaModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= NezhaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= NezhaForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= NezhaForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= NezhaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Optional[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.num_labels
SCREAMING_SNAKE_CASE__: Tuple= NezhaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Any= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.num_labels
SCREAMING_SNAKE_CASE__: str= NezhaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.num_choices
SCREAMING_SNAKE_CASE__: List[Any]= NezhaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: Optional[int]= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): Optional[int]= config_and_inputs
SCREAMING_SNAKE_CASE__: Tuple= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= NezhaModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): List[Any]= self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__: int= None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: int= NezhaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE__: Tuple= True
SCREAMING_SNAKE_CASE__: Any= model_class(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.jit.trace(
lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , '''bert.pt''' ) )
SCREAMING_SNAKE_CASE__: Any= torch.jit.load(os.path.join(lowerCAmelCase , '''bert.pt''' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE__: int= torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: List[Any]= torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 ) )
| 64
|
from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Dict , _A : List[Any]=2 , _A : int=56 , _A : Tuple=True , _A : Any=True , _A : List[Any]=True , _A : List[Any]=True , _A : Optional[Any]=99 , _A : Optional[int]=32 , _A : Optional[int]=2 , _A : Optional[int]=2 , _A : int=7 , _A : Dict="gelu_new" , _A : Union[str, Any]=0.1 , _A : str=0.1 , _A : Optional[int]=512 , _A : Optional[int]=16 , _A : Any=2 , _A : List[str]=0.02 , _A : List[Any]=4 , _A : Dict="block_sparse" , _A : Dict=True , _A : str=False , _A : str=2 , _A : Union[str, Any]=3 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_attention_mask
__SCREAMING_SNAKE_CASE : Any = use_token_type_ids
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_choices
__SCREAMING_SNAKE_CASE : Optional[Any] = rescale_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_type
__SCREAMING_SNAKE_CASE : Optional[int] = use_bias
__SCREAMING_SNAKE_CASE : Any = block_size
__SCREAMING_SNAKE_CASE : Any = num_random_blocks
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
super().test_hidden_states_output()
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = model_class(_A )
@jax.jit
def model_jitted(_A : Tuple , _A : Optional[Any]=None , **_A : Optional[Any] ):
return model(input_ids=_A , attention_mask=_A , **_A )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : List[Any] , _A : Any , _A : List[str] , _A : int , _A : List[str]=1e-5 , _A : Union[str, Any]="outputs" , _A : Optional[int]=None ):
"""simple docstring"""
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_A , _A , _A , _A , _A , _A )
| 74
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase : List[Any] = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , *A_ : Tuple , **A_ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ )
| 584
|
from __future__ import annotations
lowercase : Optional[int] = [True] * 1_0_0_0_0_0_1
lowercase : Tuple = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
lowercase : int = False
i += 1
def UpperCAmelCase_ ( _UpperCAmelCase ):
return seive[n]
def UpperCAmelCase_ ( _UpperCAmelCase ):
return any(digit in """02468""" for digit in str(_UpperCAmelCase ) )
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0_0_0_0_0 ):
lowerCamelCase_: Tuple = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_UpperCAmelCase ) and not contains_an_even_digit(_UpperCAmelCase ):
lowerCamelCase_: List[str] = str(_UpperCAmelCase )
lowerCamelCase_: str = [int(str_num[j:] + str_num[:j] ) for j in range(len(_UpperCAmelCase ) )]
if all(is_prime(_UpperCAmelCase ) for i in list_nums ):
result.append(_UpperCAmelCase )
return result
def UpperCAmelCase_ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 584
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a :List[str] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = b.T
SCREAMING_SNAKE_CASE__ : Dict = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE__ : int = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE__ : int = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = ["""pixel_values"""]
def __init__( self , _a = None , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = True , **_a , ) -> None:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(_a ) if clusters is not None else None
SCREAMING_SNAKE_CASE__ : Tuple = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : str = resample
SCREAMING_SNAKE_CASE__ : Dict = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = do_color_quantize
def _a ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_a , size=(size["""height"""], size["""width"""]) , resample=_a , data_format=_a , **_a )
def _a ( self , _a , _a = None , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale(image=_a , scale=1 / 127.5 , data_format=_a )
SCREAMING_SNAKE_CASE__ : Any = image - 1
return image
def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : str = get_size_dict(_a )
SCREAMING_SNAKE_CASE__ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE__ : List[str] = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.normalize(image=_a ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE__ : Tuple = [to_channel_dimension_format(_a , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE__ : Dict = np.array(_a )
SCREAMING_SNAKE_CASE__ : List[str] = color_quantize(_a , _a ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE__ : Any = images.shape[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = images.reshape(_a , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE__ : Any = list(_a )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 680
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
class __A ( __UpperCamelCase ):
def __init__(self : Optional[Any] , *__a : Optional[Any] , **__a : Optional[int] ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 78
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class lowercase ( __UpperCamelCase ):
__a = """xmod"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=("en_XX",) , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : List[str] = pre_norm
lowerCAmelCase__ : str = adapter_reduction_factor
lowerCAmelCase__ : Optional[int] = adapter_layer_norm
lowerCAmelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCAmelCase__ : Optional[Any] = ln_before_adapter
lowerCAmelCase__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = default_language
class lowercase ( __UpperCamelCase ):
@property
def lowercase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 233
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Any =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Tuple =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[str] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Dict =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
model.eval()
SCREAMING_SNAKE_CASE_: str =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int =model(**lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
SCREAMING_SNAKE_CASE_: Optional[int] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_: str =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE_: str =metric.compute()
return eval_metric["accuracy"]
def __magic_name__ ( lowercase , lowercase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: Union[str, Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Optional[int] =config["""lr"""]
SCREAMING_SNAKE_CASE_: Tuple =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: Any =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: Tuple =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: int =args.model_name_or_path
set_seed(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =get_dataloaders(lowercase , lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] =AutoModelForSequenceClassification.from_pretrained(lowercase , return_dict=lowercase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Dict =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_: Tuple =optimizer_cls(params=model.parameters() , lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_: Dict =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =1
SCREAMING_SNAKE_CASE_: str =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_: Any =get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=0 , num_training_steps=lowercase , )
else:
SCREAMING_SNAKE_CASE_: Any =DummyScheduler(lowercase , total_num_steps=lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_: str =0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE_: List[Any] =num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE_: int =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_: int =args.resume_from_checkpoint.split("""epoch_""" )[1]
SCREAMING_SNAKE_CASE_: Optional[int] =""""""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE_: int =int(lowercase ) + 1
SCREAMING_SNAKE_CASE_: Dict =evaluation_loop(lowercase , lowercase , lowercase , lowercase )
accelerator.print("""resumed checkpoint performance:""" , lowercase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , """r""" ) as f:
SCREAMING_SNAKE_CASE_: List[Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE_: Dict ={}
for epoch in range(lowercase , lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: str =outputs.loss
SCREAMING_SNAKE_CASE_: List[str] =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE_: int =f'''epoch_{epoch}'''
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =evaluation_loop(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =accuracy
SCREAMING_SNAKE_CASE_: str =lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE_: List[str] =optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] =epoch
SCREAMING_SNAKE_CASE_: str =overall_step
accelerator.print(f'''epoch {epoch}:''' , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , """w""" ) as f:
json.dump(lowercase , lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase , )
parser.add_argument(
"""--output_dir""" , type=lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase , default=lowercase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase , default=lowercase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase , default=2 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] ={"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""linear"""
a_ : List[Any] ="""cosine"""
a_ : Optional[int] ="""cosine_with_restarts"""
a_ : List[str] ="""polynomial"""
a_ : Optional[Any] ="""constant"""
a_ : List[str] ="""constant_with_warmup"""
a_ : Optional[int] ="""piecewise_constant"""
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int = -1 )-> Optional[Any]:
return LambdaLR(lowerCAmelCase , lambda lowerCAmelCase : 1 , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int = -1 )-> Tuple:
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1.0 , lowerCAmelCase ) )
return 1.0
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: str , lowerCAmelCase: int = -1 )-> Union[str, Any]:
_snake_case : Any = {}
_snake_case : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_snake_case , _snake_case : Tuple = rule_str.split(':' )
_snake_case : Optional[int] = int(lowerCAmelCase )
_snake_case : List[Any] = float(lowerCAmelCase )
_snake_case : int = value
_snake_case : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase: List[str] , lowerCAmelCase: str ):
def rule_func(lowerCAmelCase: int ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : Any = create_rules_function(lowerCAmelCase , lowerCAmelCase )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any]=-1 )-> Union[str, Any]:
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 0.5 , lowerCAmelCase: int = -1 )-> str:
def lr_lambda(lowerCAmelCase: Union[str, Any] ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
_snake_case : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int = 1 , lowerCAmelCase: int = -1 )-> Tuple:
def lr_lambda(lowerCAmelCase: Optional[Any] ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
_snake_case : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict=1E-7 , lowerCAmelCase: Union[str, Any]=1.0 , lowerCAmelCase: Any=-1 )-> Any:
_snake_case : int = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Optional[int] = lr_init - lr_end
_snake_case : str = num_training_steps - num_warmup_steps
_snake_case : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_ ( lowerCAmelCase: Union[str, SchedulerType] , lowerCAmelCase: Optimizer , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: Optional[int] = None , lowerCAmelCase: Optional[int] = None , lowerCAmelCase: int = 1 , lowerCAmelCase: float = 1.0 , lowerCAmelCase: int = -1 , )-> Dict:
_snake_case : Union[str, Any] = SchedulerType(lowerCAmelCase )
_snake_case : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase , last_epoch=lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase , step_rules=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase , num_warmup_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , num_cycles=lowerCAmelCase , last_epoch=lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , power=lowerCAmelCase , last_epoch=lowerCAmelCase , )
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
| 411
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = PegasusTokenizer
__snake_case : int = PegasusTokenizerFast
__snake_case : int = True
__snake_case : int = True
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = PegasusTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ,**lowerCamelCase__ : Tuple ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ) -> int:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """</s>"""
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""</s>""" )
self.assertEqual(vocab_keys[-1] ,"""v""" )
self.assertEqual(len(lowerCamelCase__ ) ,1103 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
SCREAMING_SNAKE_CASE = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE = """To ensure a smooth flow of bank resolutions."""
SCREAMING_SNAKE_CASE = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""This is going to be way too long.""" * 150, """short example"""]
SCREAMING_SNAKE_CASE = ["""not super long but more than 5 tokens""", """tiny"""]
SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self._large_tokenizer(
text_target=lowerCamelCase__ ,max_length=5 ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,)
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = PegasusTokenizer
__snake_case : Optional[Any] = PegasusTokenizerFast
__snake_case : Union[str, Any] = True
__snake_case : Dict = True
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = PegasusTokenizer(lowerCamelCase__ ,offset=0 ,mask_token_sent=lowerCamelCase__ ,mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,**lowerCamelCase__ : str ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] ,return_tensors=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""This is going to be way too long.""" * 1000, """short example"""]
SCREAMING_SNAKE_CASE = ["""not super long but more than 5 tokens""", """tiny"""]
SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self._large_tokenizer(
text_target=lowerCamelCase__ ,max_length=5 ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCamelCase__ ).input_ids
self.assertListEqual(
lowerCamelCase__ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] ,)
| 116
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for line in lines:
SCREAMING_SNAKE_CASE = re.sub(r"""#.*""" , """""" , _SCREAMING_SNAKE_CASE ) # remove comments
if line:
filtered_lines.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """\n""".join(_SCREAMING_SNAKE_CASE )
# Make a hash from all this code
SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" )
return shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE_ = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE_ = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE_ = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 116
| 1
|
def UpperCamelCase ( _A : int )-> bool:
"""simple docstring"""
if num < 0:
return False
A__ = num
A__ = 0
while num > 0:
A__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = True
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Dict = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = False
def __A ( self ):
A__ = OpenLlamaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "single_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "multi_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __A ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __A ( self , UpperCAmelCase__ ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] , config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {"type": scaling_type, "factor": 10.0}
A__ = OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
| 491
| 1
|
"""simple docstring"""
class lowercase :
def __init__( self , lowercase ) -> None:
lowerCAmelCase = len(lowercase )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , lowercase ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def _snake_case ( self , lowercase , lowercase ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _snake_case ( self , lowercase ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
|
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2 ):
'''simple docstring'''
lowerCAmelCase = qubits
# Using Aer's simulator
lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 393
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='ViTImageProcessor'
lowerCamelCase__ =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , a : List[Any]=None , a : str=None , **a : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Optional[int]=None , a : Tuple=None , a : List[Any]=None , **a : str ) -> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE : List[str] = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE : int = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE : List[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE : Any = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def __UpperCamelCase ( self : Optional[Any] , *a : int , **a : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def __UpperCamelCase ( self : Optional[int] , *a : Any , **a : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 25
|
from math import factorial, pi
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : int = float(_a)
SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_a))
def lowerCamelCase__ ( _a , _a = 30):
if not isinstance(_a , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_a , _a) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
SCREAMING_SNAKE_CASE : str = float(_a)
SCREAMING_SNAKE_CASE : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_a))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 25
| 1
|
'''simple docstring'''
import itertools
import os
import re
a_ : int = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
a_ : Optional[int] = re.compile(R"""([a-z\d])([A-Z])""")
a_ : List[Any] = re.compile(R"""(?<!_)_(?!_)""")
a_ : int = re.compile(R"""(_{2,})""")
a_ : List[str] = R"""^\w+(\.\w+)*$"""
a_ : List[str] = R"""<>:/\|?*"""
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = _uppercase_uppercase_re.sub(r"\1_\2" , UpperCAmelCase_ )
lowerCamelCase_ = _lowercase_uppercase_re.sub(r"\1_\2" , UpperCAmelCase_ )
return name.lower()
def __snake_case ( UpperCAmelCase_ : Dict ):
lowerCamelCase_ = _single_underscore_re.split(UpperCAmelCase_ )
lowerCamelCase_ = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != "" )
def __snake_case ( UpperCAmelCase_ : str ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCAmelCase_ )}-{split}'''
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=None ):
lowerCamelCase_ = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F'''{filepath}*'''
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=None ):
lowerCamelCase_ = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
lowerCamelCase_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCamelCase_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 711
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : str = """src/diffusers"""
a_ : int = """."""
# This is to make sure the diffusers module imported is the one in the repo.
a_ : int = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : List[Any] = spec.loader.load_module()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
return line.startswith(UpperCAmelCase_ ) or len(UpperCAmelCase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase_ ) is not None
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = object_name.split("." )
lowerCamelCase_ = 0
# First let's find the module where our object lives.
lowerCamelCase_ = parts[i]
while i < len(UpperCAmelCase_ ) and not os.path.isfile(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase_ ):
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , parts[i] )
if i >= len(UpperCAmelCase_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ = ""
lowerCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase_ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ = line_index
while line_index < len(UpperCAmelCase_ ) and _should_continue(lines[line_index] , UpperCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
return "".join(UpperCAmelCase_ )
a_ : Optional[Any] = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
a_ : Optional[int] = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
a_ : List[str] = re.compile(R"""<FILL\s+[^>]*>""")
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = code.split("\n" )
lowerCamelCase_ = 0
while idx < len(UpperCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __snake_case ( UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = len(get_indent(UpperCAmelCase_ ) ) > 0
if has_indent:
lowerCamelCase_ = F'''class Bla:\n{code}'''
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase_ )
lowerCamelCase_ = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = style_docstrings_in_code(UpperCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False ):
with open(UpperCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
lowerCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase_ ):
lowerCamelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = search.groups()
lowerCamelCase_ = find_code_in_diffusers(UpperCAmelCase_ )
lowerCamelCase_ = get_indent(UpperCAmelCase_ )
lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ = theoretical_indent
lowerCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ = True
while line_index < len(UpperCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
break
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _should_continue(UpperCAmelCase_ , UpperCAmelCase_ ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
lowerCamelCase_ = "".join(UpperCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase_ ) is None]
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = replace_pattern.replace("with" , "" ).split("," )
lowerCamelCase_ = [_re_replace_pattern.search(UpperCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = pattern.groups()
lowerCamelCase_ = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if option.strip() == "all-casing":
lowerCamelCase_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase_ )
lowerCamelCase_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ = start_index + 1
if overwrite and len(UpperCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase_ )
return diffs
def __snake_case ( UpperCAmelCase_ : bool = False ):
lowerCamelCase_ = glob.glob(os.path.join(UpperCAmelCase_ , "**/*.py" ) , recursive=UpperCAmelCase_ )
lowerCamelCase_ = []
for filename in all_files:
lowerCamelCase_ = is_copy_consistent(UpperCAmelCase_ , UpperCAmelCase_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 445
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[int] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 653
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=14 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.0_2 , ):
A__ : int = parent
A__ : List[str] = batch_size
A__ : Optional[Any] = seq_length
A__ : Tuple = is_training
A__ : Optional[Any] = use_input_mask
A__ : int = use_token_type_ids
A__ : Optional[int] = use_labels
A__ : Any = vocab_size
A__ : Dict = hidden_size
A__ : Any = rotary_dim
A__ : Union[str, Any] = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : List[str] = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : str = initializer_range
A__ : List[str] = None
A__ : str = vocab_size - 1
A__ : Optional[Any] = vocab_size - 1
A__ : Optional[Any] = vocab_size - 1
def __snake_case ( self ):
A__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict = None
if self.use_input_mask:
A__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __snake_case ( self ):
A__ : int = self.prepare_config_and_inputs()
A__ : Optional[int] = config_and_inputs
A__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = 20
A__ : int = model_class_name(UpperCAmelCase_ )
A__ : int = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
A__ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A__ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A__ : int = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
A__ : Optional[Any] = model(UpperCAmelCase_ )
A__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = 20
A__ : int = model_class_name(UpperCAmelCase_ )
A__ : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A__ : Any = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
A__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A__ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
A__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class UpperCamelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __snake_case ( self ):
A__ : List[Any] = FlaxGPTJModelTester(self )
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def __snake_case ( self ):
A__ : Dict = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A__ : Optional[int] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
A__ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A__ : Tuple = False
A__ : Any = model.config.eos_token_id
A__ : Optional[Any] = jax.jit(model.generate )
A__ : Any = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A__ : int = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
A__ : Any = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A__ : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : str = pt_inputs['input_ids'].shape
A__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
A__ : List[str] = 0
A__ : Dict = 1
A__ : Optional[int] = 0
A__ : Optional[int] = 1
A__ : Optional[Any] = pt_model_class(UpperCAmelCase_ ).eval()
A__ : str = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
A__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
A__ : Dict = fx_state
with torch.no_grad():
A__ : str = pt_model(**UpperCAmelCase_ ).to_tuple()
A__ : Optional[Any] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
A__ : List[Any] = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
A__ : int = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A__ : int = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : List[Any] = pt_model_class(UpperCAmelCase_ ).eval()
A__ : str = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
A__ : Optional[int] = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
A__ : Union[str, Any] = pt_inputs['input_ids'].shape
A__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
A__ : List[str] = 0
A__ : Optional[int] = 1
A__ : List[str] = 0
A__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A__ : List[Any] = pt_model(**UpperCAmelCase_ ).to_tuple()
A__ : Optional[Any] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
A__ : int = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
A__ : Dict = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : Optional[int] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A__ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( __UpperCamelCase ):
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowercase , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowercase , '''num_attention_heads''' ) )
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=640 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = last_hidden_size
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = conv_kernel_size
UpperCamelCase__ = output_stride
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = use_labels
UpperCamelCase__ = is_training
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a (self ) -> List[str]:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = MobileViTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase__ = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileViTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileViTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase__ = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase__ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] =(
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Optional[int] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : int =False
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = MobileViTModelTester(self )
UpperCamelCase__ = MobileViTConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def _a (self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def _a (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def _a (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_lowercase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _a (self ) -> Any:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _a (self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = 5
self.assertEqual(len(_lowercase ) , _lowercase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase__ = 2
for i in range(len(_lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = MobileViTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __UpperCamelCase ( ):
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _a (self ) -> Optional[int]:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_lowercase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_lowercase )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
UpperCamelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCamelCase__ = model.to(_lowercase )
UpperCamelCase__ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_lowercase )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowercase )
UpperCamelCase__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCamelCase__ = model.to(_lowercase )
UpperCamelCase__ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_lowercase )
UpperCamelCase__ = outputs.logits.detach().cpu()
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(50, 60)] )
UpperCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowercase )
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
UpperCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 415
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = 1
@register_to_config
def __init__( self : str , _lowercase : int = 1_000 , _lowercase : Optional[Union[np.ndarray, List[float]]] = None) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowercase)
# standard deviation of the initial noise distribution
A_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ = 4
# running values
A_ = []
def __snake_case ( self : int , _lowercase : int , _lowercase : Union[str, torch.device] = None) -> Any:
A_ = num_inference_steps
A_ = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
A_ = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
A_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
A_ = torch.sin(steps * math.pi / 2) ** 2
A_ = (1.0 - self.betas**2) ** 0.5
A_ = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
A_ = timesteps.to(_lowercase)
A_ = []
def __snake_case ( self : Dict , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
A_ = (self.timesteps == timestep).nonzero().item()
A_ = timestep_index + 1
A_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase)
if len(self.ets) == 1:
A_ = self.ets[-1]
elif len(self.ets) == 2:
A_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
A_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase)
def __snake_case ( self : Dict , _lowercase : torch.FloatTensor , *_lowercase : Optional[Any] , **_lowercase : int) -> torch.FloatTensor:
return sample
def __snake_case ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]) -> Union[str, Any]:
A_ = self.alphas[timestep_index]
A_ = self.betas[timestep_index]
A_ = self.alphas[prev_timestep_index]
A_ = self.betas[prev_timestep_index]
A_ = (sample - sigma * ets) / max(_lowercase , 1E-8)
A_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str]) -> Union[str, Any]:
return self.config.num_train_timesteps
| 366
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =AudioLDMPipeline
_lowerCAmelCase =TEXT_TO_AUDIO_PARAMS
_lowerCAmelCase =TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCAmelCase =frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=_lowerCamelCase , )
snake_case__ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
snake_case__ : Optional[int] = ClapTextModelWithProjection(_lowerCamelCase )
snake_case__ : str = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
snake_case__ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowerCamelCase , )
snake_case__ : str = SpeechTaHifiGan(_lowerCamelCase )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
snake_case__ : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
snake_case__ : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case__ : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Dict = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : int = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : List[Any] = audioldm_pipe(**_lowerCamelCase )
snake_case__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) == 2_5_6
snake_case__ : str = audio[:1_0]
snake_case__ : Tuple = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : str = self.get_dummy_components()
snake_case__ : Tuple = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : Optional[int] = audioldm_pipe.to(_lowerCamelCase )
snake_case__ : Union[str, Any] = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : List[str] = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Any = 3 * [inputs['prompt']]
# forward
snake_case__ : List[str] = audioldm_pipe(**_lowerCamelCase )
snake_case__ : int = output.audios[0]
snake_case__ : Optional[int] = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : List[str] = 3 * [inputs.pop('prompt' )]
snake_case__ : int = audioldm_pipe.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='pt' , )
snake_case__ : Dict = text_inputs['input_ids'].to(_lowerCamelCase )
snake_case__ : Dict = audioldm_pipe.text_encoder(
_lowerCamelCase , )
snake_case__ : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case__ : List[str] = F.normalize(_lowerCamelCase , dim=-1 )
snake_case__ : List[str] = prompt_embeds
# forward
snake_case__ : int = audioldm_pipe(**_lowerCamelCase )
snake_case__ : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Tuple = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : str = audioldm_pipe.to(_lowerCamelCase )
snake_case__ : str = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : str = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Any = 3 * ['this is a negative prompt']
snake_case__ : Optional[Any] = negative_prompt
snake_case__ : Optional[Any] = 3 * [inputs['prompt']]
# forward
snake_case__ : List[Any] = audioldm_pipe(**_lowerCamelCase )
snake_case__ : Optional[Any] = output.audios[0]
snake_case__ : int = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : List[Any] = 3 * [inputs.pop('prompt' )]
snake_case__ : List[Any] = []
for p in [prompt, negative_prompt]:
snake_case__ : List[Any] = audioldm_pipe.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='pt' , )
snake_case__ : int = text_inputs['input_ids'].to(_lowerCamelCase )
snake_case__ : List[Any] = audioldm_pipe.text_encoder(
_lowerCamelCase , )
snake_case__ : List[str] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case__ : Union[str, Any] = F.normalize(_lowerCamelCase , dim=-1 )
embeds.append(_lowerCamelCase )
snake_case__ , snake_case__ : Optional[Any] = embeds
# forward
snake_case__ : int = audioldm_pipe(**_lowerCamelCase )
snake_case__ : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
snake_case__ : Tuple = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : List[Any] = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : int = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : int = 'egg cracking'
snake_case__ : Optional[Any] = audioldm_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
snake_case__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) == 2_5_6
snake_case__ : Tuple = audio[:1_0]
snake_case__ : Tuple = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : int = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
snake_case__ : List[str] = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : str = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Dict = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
snake_case__ : str = audioldm_pipe(_lowerCamelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
snake_case__ : List[str] = 2
snake_case__ : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
snake_case__ : List[Any] = 2
snake_case__ : Any = audioldm_pipe(_lowerCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=_lowerCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
snake_case__ : str = 2
snake_case__ : List[str] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowerCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : Tuple = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
snake_case__ : Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Dict = audioldm_pipe(audio_length_in_s=0.016 , **_lowerCamelCase )
snake_case__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) / vocoder_sampling_rate == 0.016
snake_case__ : List[str] = audioldm_pipe(audio_length_in_s=0.032 , **_lowerCamelCase )
snake_case__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase__ ( self : str ):
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Optional[int] = AudioLDMPipeline(**_lowerCamelCase )
snake_case__ : Union[str, Any] = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Any = ['hey']
snake_case__ : List[str] = audioldm_pipe(_lowerCamelCase , num_inference_steps=1 )
snake_case__ : List[Any] = output.audios.shape
assert audio_shape == (1, 2_5_6)
snake_case__ : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
snake_case__ : List[str] = SpeechTaHifiGan(_lowerCamelCase ).to(_lowerCamelCase )
snake_case__ : Optional[Any] = audioldm_pipe(_lowerCamelCase , num_inference_steps=1 )
snake_case__ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCAmelCase__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowerCamelCase )
@slow
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str]="cpu" , _lowerCamelCase : Optional[int]=torch.floataa , _lowerCamelCase : List[str]=0 ):
snake_case__ : Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case__ : List[str] = np.random.RandomState(_lowerCamelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
snake_case__ : Optional[Any] = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
snake_case__ : Any = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
snake_case__ : Optional[int] = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : int = self.get_inputs(_lowerCamelCase )
snake_case__ : str = 2_5
snake_case__ : Any = audioldm_pipe(**_lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) == 8_1_9_2_0
snake_case__ : List[str] = audio[7_7_2_3_0:7_7_2_4_0]
snake_case__ : Union[str, Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
snake_case__ : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCAmelCase__ ( self : str ):
snake_case__ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
snake_case__ : List[str] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
snake_case__ : Dict = audioldm_pipe.to(_lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Optional[int] = self.get_inputs(_lowerCamelCase )
snake_case__ : Dict = audioldm_pipe(**_lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_lowerCamelCase ) == 8_1_9_2_0
snake_case__ : List[str] = audio[2_7_7_8_0:2_7_7_9_0]
snake_case__ : Any = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
snake_case__ : List[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 303
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =DanceDiffusionPipeline
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase =PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
snake_case__ : Any = IPNDMScheduler()
snake_case__ : Any = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
snake_case__ : Dict = torch.manual_seed(_lowerCamelCase )
else:
snake_case__ : Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case__ : List[str] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCAmelCase__ ( self : int ):
snake_case__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Optional[int] = DanceDiffusionPipeline(**_lowerCamelCase )
snake_case__ : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Tuple = pipe(**_lowerCamelCase )
snake_case__ : Optional[Any] = output.audios
snake_case__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case__ : Optional[int] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase__ ( self : int ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = torch_device
snake_case__ : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
snake_case__ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : int = output.audios
snake_case__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Optional[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[int] = torch_device
snake_case__ : Any = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : Dict = output.audios
snake_case__ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303
| 1
|
def _snake_case (__lowercase , __lowercase):
if density <= 0:
raise ValueError('Impossible fluid density')
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus')
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( UpperCAmelCase ):
a_ = '''wav2vec2'''
def __init__( self : List[Any] , __a : str=3_2 , __a : str=7_6_8 , __a : Tuple=1_2 , __a : List[Any]=1_2 , __a : Any=3_0_7_2 , __a : Union[str, Any]="gelu" , __a : Any=0.1 , __a : List[Any]=0.1 , __a : Any=0.1 , __a : str=0.0 , __a : Optional[Any]=0.0 , __a : int=0.1 , __a : str=0.1 , __a : Tuple=0.0_2 , __a : List[str]=1e-5 , __a : Union[str, Any]="group" , __a : Any="gelu" , __a : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __a : Tuple=(5, 2, 2, 2, 2, 2, 2) , __a : str=(1_0, 3, 3, 3, 3, 2, 2) , __a : Optional[int]=False , __a : Optional[int]=1_2_8 , __a : Any=1_6 , __a : Any=False , __a : List[Any]=True , __a : Optional[int]=0.0_5 , __a : int=1_0 , __a : Tuple=2 , __a : Any=0.0 , __a : int=1_0 , __a : Optional[Any]=0 , __a : Dict=3_2_0 , __a : int=2 , __a : Union[str, Any]=0.1 , __a : str=1_0_0 , __a : str=2_5_6 , __a : str=2_5_6 , __a : Optional[int]=0.1 , __a : str="sum" , __a : Any=False , __a : Any=False , __a : Union[str, Any]=2_5_6 , __a : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __a : Tuple=(5, 3, 3, 1, 1) , __a : str=(1, 2, 3, 1, 1) , __a : Optional[Any]=5_1_2 , __a : Any=0 , __a : Dict=1 , __a : Tuple=2 , __a : Optional[int]=False , __a : List[str]=3 , __a : List[Any]=2 , __a : Union[str, Any]=3 , __a : Any=None , __a : Dict=None , **__a : Union[str, Any] , ) -> Any:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = feat_extract_norm
__UpperCAmelCase = feat_extract_activation
__UpperCAmelCase = list(__a )
__UpperCAmelCase = list(__a )
__UpperCAmelCase = list(__a )
__UpperCAmelCase = conv_bias
__UpperCAmelCase = num_conv_pos_embeddings
__UpperCAmelCase = num_conv_pos_embedding_groups
__UpperCAmelCase = len(self.conv_dim )
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = feat_proj_dropout
__UpperCAmelCase = final_dropout
__UpperCAmelCase = layerdrop
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = vocab_size
__UpperCAmelCase = do_stable_layer_norm
__UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase = apply_spec_augment
__UpperCAmelCase = mask_time_prob
__UpperCAmelCase = mask_time_length
__UpperCAmelCase = mask_time_min_masks
__UpperCAmelCase = mask_feature_prob
__UpperCAmelCase = mask_feature_length
__UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCAmelCase = num_codevectors_per_group
__UpperCAmelCase = num_codevector_groups
__UpperCAmelCase = contrastive_logits_temperature
__UpperCAmelCase = feat_quantizer_dropout
__UpperCAmelCase = num_negatives
__UpperCAmelCase = codevector_dim
__UpperCAmelCase = proj_codevector_dim
__UpperCAmelCase = diversity_loss_weight
# ctc loss
__UpperCAmelCase = ctc_loss_reduction
__UpperCAmelCase = ctc_zero_infinity
# adapter
__UpperCAmelCase = add_adapter
__UpperCAmelCase = adapter_kernel_size
__UpperCAmelCase = adapter_stride
__UpperCAmelCase = num_adapter_layers
__UpperCAmelCase = output_hidden_size or hidden_size
__UpperCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCAmelCase = list(__a )
__UpperCAmelCase = list(__a )
__UpperCAmelCase = list(__a )
__UpperCAmelCase = xvector_output_dim
@property
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 654
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : str=False ) -> Any:
try:
_lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
A = parse_flag_from_env('RUN_SLOW', default=False)
A = parse_flag_from_env('RUN_REMOTE', default=False)
A = parse_flag_from_env('RUN_LOCAL', default=True)
A = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
A = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCamelCase ( UpperCamelCase : List[str] ) -> Union[str, Any]:
try:
import faiss # noqa
except ImportError:
_lowerCamelCase = unittest.skip('test requires faiss' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Optional[int] ) -> Dict:
try:
import regex # noqa
except ImportError:
_lowerCamelCase = unittest.skip('test requires regex' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : List[str] ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
_lowerCamelCase = unittest.skip('test requires elasticsearch' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Any ) -> str:
try:
import sqlalchemy # noqa
except ImportError:
_lowerCamelCase = unittest.skip('test requires sqlalchemy' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Optional[int] ) -> List[str]:
if not config.TORCH_AVAILABLE:
_lowerCamelCase = unittest.skip('test requires PyTorch' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Any ) -> str:
if not config.TF_AVAILABLE:
_lowerCamelCase = unittest.skip('test requires TensorFlow' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Dict ) -> int:
if not config.JAX_AVAILABLE:
_lowerCamelCase = unittest.skip('test requires JAX' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Dict ) -> List[Any]:
if not config.PIL_AVAILABLE:
_lowerCamelCase = unittest.skip('test requires Pillow' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : List[str] ) -> int:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(_A )
else:
return test_case
def lowerCamelCase ( UpperCamelCase : Any ) -> Union[str, Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(_A )
else:
return test_case
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(_A )
else:
return test_case
def lowerCamelCase ( UpperCamelCase : int ) -> Optional[Any]:
def _require_spacy_model(UpperCamelCase : List[Any] ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip('test requires spacy' )(_A )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def lowerCamelCase ( UpperCamelCase : Optional[int] ) -> Union[str, Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(_A )
else:
return test_case
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> str:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(_A )
else:
return test_case
def lowerCamelCase ( UpperCamelCase : Dict ) -> List[str]:
if not _run_slow_tests or _run_slow_tests == 0:
_lowerCamelCase = unittest.skip('test is slow' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Dict ) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
_lowerCamelCase = unittest.skip('test is local' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Dict:
if not _run_packaged_tests or _run_packaged_tests == 0:
_lowerCamelCase = unittest.skip('test is packaged' )(_A )
return test_case
def lowerCamelCase ( UpperCamelCase : Tuple ) -> Dict:
if not _run_remote_tests or _run_remote_tests == 0:
_lowerCamelCase = unittest.skip('test requires remote' )(_A )
return test_case
def lowerCamelCase ( *UpperCamelCase : List[Any] ) -> Tuple:
def decorate(cls : Dict ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith('test' ):
for decorator in decorators:
_lowerCamelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@contextmanager
def lowerCamelCase ( UpperCamelCase : Tuple=OfflineSimulationMode.CONNECTION_FAILS , UpperCamelCase : int=1e-1_6 ) -> Optional[Any]:
_lowerCamelCase = requests.Session().request
def timeout_request(UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , **UpperCamelCase : Tuple ):
# Change the url to an invalid url so that the connection hangs
_lowerCamelCase = """https://10.255.255.1"""
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_lowerCamelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_lowerCamelCase = url
_lowerCamelCase = e.args[0]
_lowerCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
_lowerCamelCase = (max_retry_error,)
raise
def raise_connection_error(UpperCamelCase : Dict , UpperCamelCase : int , **UpperCamelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _A ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowerCamelCase ( *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ) -> Optional[int]:
_lowerCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def lowerCamelCase ( ) -> Union[str, Any]:
import gc
gc.collect()
_lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase ( ) -> Dict:
import gc
gc.collect()
_lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> Dict:
return deepcopy(_A ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_A ).integers(0 , 1_00 , 10 ).tolist()
def lowerCamelCase ( UpperCamelCase : Dict ) -> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(UpperCamelCase : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[Any] ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith('500' ) or str(_A ).startswith('502' ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = returncode
_lowerCamelCase = stdout
_lowerCamelCase = stderr
async def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : str ) -> List[Any]:
while True:
_lowerCamelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def lowerCamelCase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : int=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(_A ) )
_lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase = []
_lowerCamelCase = []
def tee(UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : int="" ):
_lowerCamelCase = line.decode('utf-8' ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda UpperCamelCase : tee(_A , _A , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda UpperCamelCase : tee(_A , _A , sys.stderr , label='stderr:' ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : str=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=1_80 , UpperCamelCase : Tuple=False , UpperCamelCase : Any=True ) -> _RunOutput:
_lowerCamelCase = asyncio.get_event_loop()
_lowerCamelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_lowerCamelCase = """ """.join(_A )
if result.returncode > 0:
_lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def lowerCamelCase ( ) -> Dict:
_lowerCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
_lowerCamelCase = re.sub(R'^gw' , '' , _A , 0 , re.M )
return int(_A )
def lowerCamelCase ( ) -> int:
_lowerCamelCase = 2_95_00
_lowerCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 544
|
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> int:
lowercase : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase : List[Any] = n - k
# Calculate C(n,k)
for i in range(_A ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _A ) -> int:
return binomial_coefficient(2 * node_count , _A ) // (node_count + 1)
def UpperCamelCase ( _A ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( _A ) -> int:
return catalan_number(_A ) * factorial(_A )
if __name__ == "__main__":
_lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 264
| 0
|
lowercase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCamelCase__ ( ):
__lowerCamelCase : List[Any] = input('Enter message: ' )
__lowerCamelCase : Optional[int] = input('Enter key [alphanumeric]: ' )
__lowerCamelCase : Dict = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
__lowerCamelCase : Optional[Any] = 'encrypt'
__lowerCamelCase : Optional[int] = encrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif mode.lower().startswith('d' ):
__lowerCamelCase : Union[str, Any] = 'decrypt'
__lowerCamelCase : Tuple = decrypt_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'\n{mode.title()}ed message:' )
print(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encrypt' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return translate_message(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decrypt' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = []
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = key.upper()
for symbol in message:
__lowerCamelCase : Optional[int] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = 0
else:
translated.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 711
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = '▁'
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BigBirdTokenizer
__snake_case = BigBirdTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: int ):
super().setUp()
__lowerCamelCase : str = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self: str ):
__lowerCamelCase : int = '<s>'
__lowerCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1004 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self: Dict ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : str = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = tokenizer.tokenize(a )
__lowerCamelCase : List[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase : str = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(a )
__lowerCamelCase : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = BigBirdTokenizer(a , keep_accents=a )
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _snake_case ( self: Dict ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = 'Hello World!'
__lowerCamelCase : Optional[int] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
__lowerCamelCase : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _snake_case ( self: List[Any] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCamelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase : int = ' '.join(a )
__lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Optional[int] = BigBirdConfig(attention_type='original_full' )
__lowerCamelCase : Any = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def _snake_case ( self: List[Any] ):
# fmt: off
__lowerCamelCase : Optional[Any] = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 230
| 0
|
"""simple docstring"""
from typing import List
import numpy as np
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = {key: len(__UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__lowercase : int = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = []
for group_idx in range(__UpperCamelCase ):
__lowercase : Optional[int] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase : Any = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase : int = range(__UpperCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCamelCase )
return shards_indices_per_group
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = _number_of_shards_in_gen_kwargs(__UpperCamelCase )
if num_shards == 1:
return [dict(__UpperCamelCase )]
else:
__lowercase : List[str] = _distribute_shards(num_shards=__UpperCamelCase , max_num_jobs=__UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCamelCase , __UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCamelCase ) )
]
def __UpperCAmelCase ( __UpperCamelCase ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = {len(__UpperCamelCase ) for value in gen_kwargs.values() if isinstance(__UpperCamelCase , __UpperCamelCase )}
__lowercase : Union[str, Any] = {}
for size in list_sizes:
__lowercase : List[Any] = list(range(__UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase : str = dict(__UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Dict = [value[i] for i in indices_per_size[len(__UpperCamelCase )]]
return shuffled_kwargs
| 76
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCamelCase__ ( UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=16 , UpperCAmelCase_ = 10 , UpperCAmelCase_ = 2 ) -> str:
'''simple docstring'''
def get_dataset(UpperCAmelCase_ ):
_lowercase : List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCAmelCase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowercase : int = get_dataset(UpperCAmelCase_ )
_lowercase : Optional[Any] = get_dataset(UpperCAmelCase_ )
_lowercase : Tuple = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 )
_lowercase : List[Any] = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> str:
'''simple docstring'''
_lowercase : List[Any] = []
for epoch in range(UpperCAmelCase_ ):
# Train quickly
model.train()
for batch in dataloader:
_lowercase , _lowercase : int = batch
_lowercase : List[str] = model(UpperCAmelCase_ )
_lowercase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
super().__init__()
_lowercase : str = nn.Parameter(torch.randn(1 ) )
_lowercase : Dict = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return x * self.a + self.b
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase , automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : Optional[Any] = Accelerator(project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Any = dummy_dataloaders()
# Train baseline
_lowercase : Any = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
_lowercase : List[Any] = os.path.join(UpperCamelCase , '''initial''' )
accelerator.save_state(UpperCamelCase )
((_lowercase) , (_lowercase)) : List[Any] = model.a.item(), model.b.item()
_lowercase : Dict = optimizer.state_dict()
_lowercase : str = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[Any] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Optional[Any] = dummy_dataloaders()
_lowercase : Dict = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
accelerator.load_state(UpperCamelCase )
((_lowercase) , (_lowercase)) : str = model.a.item(), model.b.item()
_lowercase : List[Any] = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
_lowercase : Dict = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save everything
_lowercase : Dict = os.path.join(UpperCamelCase , '''checkpoint''' )
accelerator.save_state(UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase )
test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : Optional[int] = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Any = dummy_dataloaders()
_lowercase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : Union[str, Any] = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
_lowercase : str = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : List[str] = model.a.item(), model.b.item()
_lowercase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : Dict = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase )
_lowercase : Tuple = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((_lowercase) , (_lowercase)) : Union[str, Any] = model.a.item(), model.b.item()
_lowercase : Any = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
_lowercase : Optional[int] = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Optional[int] = torch.tensor([1, 2, 3] )
_lowercase : Any = torch.tensor([2, 3, 4] )
_lowercase : Dict = DummyModel()
_lowercase : Tuple = torch.optim.Adam(net.parameters() )
_lowercase : Tuple = Accelerator()
with self.assertRaises(UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_lowercase : int = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Optional[int] = torch.optim.lr_scheduler.StepLR(UpperCamelCase , step_size=1 , gamma=0.99 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : int = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : int = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
_lowercase : Optional[Any] = scheduler.state_dict()
train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertNotEqual(UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(UpperCamelCase , scheduler.state_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : List[str] = DummyModel()
_lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase , total_limit=2 )
# Train baseline
_lowercase : List[Any] = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase : Union[str, Any] = accelerator.prepare(UpperCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : str = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = '/tmp/accelerate/state_checkpointing'
UpperCamelCase__ = DummyModel()
UpperCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase__ , UpperCamelCase__ = dummy_dataloaders()
UpperCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 322
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCamelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
SCREAMING_SNAKE_CASE__ =try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
SCREAMING_SNAKE_CASE__ =PipelineDataFormat.from_str(
format=__UpperCamelCase, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, )
return RunCommand(__UpperCamelCase, __UpperCamelCase )
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_UpperCamelCase : Pipeline ,_UpperCamelCase : PipelineDataFormat ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =nlp
SCREAMING_SNAKE_CASE__ =reader
@staticmethod
def __A ( _UpperCamelCase : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =parser.add_parser("""run""" ,help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" ,choices=get_supported_tasks() ,help="""Task to run""" )
run_parser.add_argument("""--input""" ,type=_UpperCamelCase ,help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" ,type=_UpperCamelCase ,help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" ,type=_UpperCamelCase ,help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" ,type=_UpperCamelCase ,help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" ,type=_UpperCamelCase ,help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" ,type=_UpperCamelCase ,help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" ,)
run_parser.add_argument(
"""--format""" ,type=_UpperCamelCase ,default="""infer""" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="""Input format to read from""" ,)
run_parser.add_argument(
"""--device""" ,type=_UpperCamelCase ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,)
run_parser.add_argument("""--overwrite""" ,action="""store_true""" ,help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_UpperCamelCase )
def __A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE__ =nlp(**_UpperCamelCase ) if self._reader.is_multi_columns else nlp(_UpperCamelCase )
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
outputs.append(_UpperCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE__ =self._reader.save_binary(_UpperCamelCase )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_UpperCamelCase )
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = "trocr"
_A : Optional[int] = ["past_key_values"]
_A : Dict = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : str ,_UpperCamelCase : Dict=5_0_2_6_5 ,_UpperCamelCase : int=1_0_2_4 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : Union[str, Any]=1_6 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str="gelu" ,_UpperCamelCase : Dict=5_1_2 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : str=0.02 ,_UpperCamelCase : Any=0.0 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : str=False ,_UpperCamelCase : int=True ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : str=1 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=2 ,**_UpperCamelCase : Any ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =vocab_size
SCREAMING_SNAKE_CASE__ =d_model
SCREAMING_SNAKE_CASE__ =decoder_layers
SCREAMING_SNAKE_CASE__ =decoder_attention_heads
SCREAMING_SNAKE_CASE__ =decoder_ffn_dim
SCREAMING_SNAKE_CASE__ =activation_function
SCREAMING_SNAKE_CASE__ =max_position_embeddings
SCREAMING_SNAKE_CASE__ =dropout
SCREAMING_SNAKE_CASE__ =attention_dropout
SCREAMING_SNAKE_CASE__ =activation_dropout
SCREAMING_SNAKE_CASE__ =init_std
SCREAMING_SNAKE_CASE__ =decoder_layerdrop
SCREAMING_SNAKE_CASE__ =use_cache
SCREAMING_SNAKE_CASE__ =scale_embedding
SCREAMING_SNAKE_CASE__ =use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ =layernorm_embedding
super().__init__(
pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
| 588
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
_a : str = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_a : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={"help": "A folder containing the training data."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={"help": "A folder containing the validation data."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
_SCREAMING_SNAKE_CASE : int = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a ( self : List[str] ) -> Tuple:
__snake_case = {}
if self.train_dir is not None:
__snake_case = self.train_dir
if self.validation_dir is not None:
__snake_case = self.validation_dir
__snake_case = data_files if data_files else None
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : str = field(
default=__lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
_SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={"help": "Name or path of preprocessor config."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={"help": "Stride to use for the encoder."} , )
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=192 , SCREAMING_SNAKE_CASE_ : int=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=0.6 ) -> List[str]:
__snake_case = input_size
__snake_case = mask_patch_size
__snake_case = model_patch_size
__snake_case = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__snake_case = self.input_size // self.mask_patch_size
__snake_case = self.mask_patch_size // self.model_patch_size
__snake_case = self.rand_size**2
__snake_case = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[Any] ) -> Tuple:
__snake_case = np.random.permutation(self.token_count )[: self.mask_count]
__snake_case = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = mask.reshape((self.rand_size, self.rand_size) )
__snake_case = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = torch.stack([example['pixel_values'] for example in examples] )
__snake_case = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a () -> Dict:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__snake_case = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__snake_case = ds['train'].train_test_split(data_args.train_val_split )
__snake_case = split['train']
__snake_case = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ )
elif model_args.model_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__ , 'decoder_type' ):
__snake_case = 'simmim'
# adapt config
__snake_case = model_args.image_size if model_args.image_size is not None else config.image_size
__snake_case = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__snake_case = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__snake_case = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
__snake_case = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__snake_case = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__snake_case = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__snake_case = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__snake_case = AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
__snake_case = ds['train'].column_names
else:
__snake_case = ds['validation'].column_names
if data_args.image_column_name is not None:
__snake_case = data_args.image_column_name
elif "image" in column_names:
__snake_case = 'image'
elif "img" in column_names:
__snake_case = 'img'
else:
__snake_case = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__snake_case = Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__snake_case = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase__ : Union[str, Any] ):
__snake_case = [transforms(lowercase__ ) for image in examples[image_column_name]]
__snake_case = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__snake_case = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__snake_case = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
__snake_case = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case = trainer.evaluate()
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
# Write model card and (optionally) push to hub
__snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 56
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__a = ZeroShotClassificationPipeline(
model=UpperCamelCase , tokenizer=UpperCamelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Tuple:
__a = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
# No kwarg
__a = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
__a = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
__a = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__a = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__a = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCamelCase , {'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
__a = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCamelCase , [
{'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]}
for i in range(1 )
] , )
__a = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCamelCase , [
{'sequence': ANY(UpperCamelCase ), 'labels': [ANY(UpperCamelCase ), ANY(UpperCamelCase )], 'scores': [ANY(UpperCamelCase ), ANY(UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCamelCase ):
classifier(UpperCamelCase , candidate_labels='politics' )
with self.assertRaises(UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCamelCase )
with self.assertRaises(UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCamelCase , )
self.run_entailment_id(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[int]:
__a = zero_shot_classifier.model.config
__a = config.labelaid
__a = zero_shot_classifier.entailment_id
__a = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__a = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__a = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__a = original_labelaid
self.assertEqual(UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase__ ( self ) -> Dict:
__a = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
__a = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__a = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__a = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 539
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mvp'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=50_267 , lowerCamelCase__=1_024 , lowerCamelCase__=12 , lowerCamelCase__=4_096 , lowerCamelCase__=16 , lowerCamelCase__=12 , lowerCamelCase__=4_096 , lowerCamelCase__=16 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=1_024 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=100 , lowerCamelCase__=800 , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = use_prompt
__lowerCamelCase = prompt_length
__lowerCamelCase = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowerCamelCase__ ):
__lowerCamelCase = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 167
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__A = [file for file in filepaths if " " in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__A = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 167
| 1
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowercase : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_lowercase : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
re.sub('''<n>''' , '''''' , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 210
|
'''simple docstring'''
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = len(A )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(A )]
# Reverse whole list
UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(A )]
cur -= 1
return arr
if __name__ == "__main__":
_lowercase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 210
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
lowerCamelCase__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self : int , a : Tuple=None , a : Optional[int]=None , a : int="<s>" , a : Optional[int]="</s>" , a : int="</s>" , a : Tuple="<s>" , a : Dict="<unk>" , a : int="<pad>" , a : List[Any]="<mask>" , a : Optional[int]=None , a : Optional[int]=None , a : List[Any]=None , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase__ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase__ : List[str] = {
lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase__ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : str , a : Optional[str] , a : Optional[str] , **a : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase__ : Union[str, Any] = src_lang
lowerCAmelCase__ : Optional[Any] = self(a , add_special_tokens=a , return_tensors=a , **a )
lowerCAmelCase__ : List[str] = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : List[Any] = tgt_lang_id
return inputs
def _lowerCamelCase ( self : Optional[Any] , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : str = src_lang
lowerCAmelCase__ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self : Union[str, Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 69
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
| 69
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
UpperCAmelCase_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
lowerCAmelCase : int = DistilBertTokenizer
def __init__( self : int , _lowercase : Optional[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=True , _lowercase : List[str]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : Optional[int]="[PAD]" , _lowercase : List[Any]="[CLS]" , _lowercase : List[str]="[MASK]" , _lowercase : Optional[int]=True , _lowercase : Any=None , **_lowercase : str , ):
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
_UpperCamelCase: Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
_UpperCamelCase: str = getattr(_lowercase , normalizer_state.pop('''type''' ) )
_UpperCamelCase: Union[str, Any] = do_lower_case
_UpperCamelCase: int = strip_accents
_UpperCamelCase: Union[str, Any] = tokenize_chinese_chars
_UpperCamelCase: Optional[int] = normalizer_class(**_lowercase )
_UpperCamelCase: Dict = do_lower_case
def lowerCAmelCase ( self : Tuple , _lowercase : Optional[int] , _lowercase : Any=None ):
"""simple docstring"""
_UpperCamelCase: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
_UpperCamelCase: Dict = [self.sep_token_id]
_UpperCamelCase: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
_UpperCamelCase: Any = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 271
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( lowercase: str = "" ) -> dict[str, float]:
'''simple docstring'''
_UpperCamelCase: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase: Union[str, Any] = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
_UpperCamelCase: List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase: str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def lowerCAmelCase_ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
_UpperCamelCase: Any = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase: Optional[Any] = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 271
| 1
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def A_ ( snake_case ):
return (gray > 127) & (gray <= 255)
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = np.zeros_like(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE:int = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE:str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE:str = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
A_ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
A_ = np.array(Image.open(lena_path))
# kernel to be applied
A_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A_ = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 710
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Union[str, Any] = ['''pixel_values''']
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE:List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE:Dict = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE:Any = resample
SCREAMING_SNAKE_CASE:Dict = do_rescale
SCREAMING_SNAKE_CASE:Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE:Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE:Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE:Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE:Any = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE:Any = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE:Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:List[str] = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE:Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:int = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:Optional[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:int = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 465
| 0
|
from math import isqrt, loga
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 80_0800 , _UpperCAmelCase = 80_0800):
SCREAMING_SNAKE_CASE = degree * loga(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_prime_numbers(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase_ = 5_0_0_0_3
UpperCAmelCase_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = PLBartTokenizer
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''base''' , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''base''' , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 4 , _lowerCAmelCase )]
self.assertListEqual(_lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
_lowerCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = PLBartTokenizer(_lowerCAmelCase , language_codes='''multi''' , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 7 , _lowerCAmelCase )]
self.assertListEqual(
_lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
_lowerCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCAmelCase = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = '''uclanlp/plbart-python-en_XX'''
SCREAMING_SNAKE_CASE__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
SCREAMING_SNAKE_CASE__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
SCREAMING_SNAKE_CASE__ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCAmelCase ( cls ):
_lowerCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
_lowerCAmelCase = 1
return cls
def __lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PLBartTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='''pt''' )
_lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
_lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='''pt''' )
_lowerCAmelCase = targets['''input_ids''']
_lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 664
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'deta'
__UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self, A_=None, A_=900, A_=2048, A_=6, A_=2048, A_=8, A_=6, A_=1024, A_=8, A_=0.0, A_=True, A_="relu", A_=256, A_=0.1, A_=0.0, A_=0.0, A_=0.02, A_=1.0, A_=True, A_=False, A_="sine", A_=5, A_=4, A_=4, A_=True, A_=300, A_=True, A_=True, A_=1, A_=5, A_=2, A_=1, A_=1, A_=5, A_=2, A_=0.1, A_=0.25, **A_, ) -> List[Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ =CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(A_, A_ ):
UpperCAmelCase__ =backbone_config.pop("model_type" )
UpperCAmelCase__ =CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ =config_class.from_dict(A_ )
UpperCAmelCase__ =backbone_config
UpperCAmelCase__ =num_queries
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =d_model
UpperCAmelCase__ =encoder_ffn_dim
UpperCAmelCase__ =encoder_layers
UpperCAmelCase__ =encoder_attention_heads
UpperCAmelCase__ =decoder_ffn_dim
UpperCAmelCase__ =decoder_layers
UpperCAmelCase__ =decoder_attention_heads
UpperCAmelCase__ =dropout
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =activation_dropout
UpperCAmelCase__ =activation_function
UpperCAmelCase__ =init_std
UpperCAmelCase__ =init_xavier_std
UpperCAmelCase__ =encoder_layerdrop
UpperCAmelCase__ =auxiliary_loss
UpperCAmelCase__ =position_embedding_type
# deformable attributes
UpperCAmelCase__ =num_feature_levels
UpperCAmelCase__ =encoder_n_points
UpperCAmelCase__ =decoder_n_points
UpperCAmelCase__ =two_stage
UpperCAmelCase__ =two_stage_num_proposals
UpperCAmelCase__ =with_box_refine
UpperCAmelCase__ =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase__ =class_cost
UpperCAmelCase__ =bbox_cost
UpperCAmelCase__ =giou_cost
# Loss coefficients
UpperCAmelCase__ =mask_loss_coefficient
UpperCAmelCase__ =dice_loss_coefficient
UpperCAmelCase__ =bbox_loss_coefficient
UpperCAmelCase__ =giou_loss_coefficient
UpperCAmelCase__ =eos_coefficient
UpperCAmelCase__ =focal_alpha
super().__init__(is_encoder_decoder=A_, **A_ )
@property
def __UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self ) -> int:
return self.d_model
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =copy.deepcopy(self.__dict__ )
UpperCAmelCase__ =self.backbone_config.to_dict()
UpperCAmelCase__ =self.__class__.model_type
return output
| 625
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'roformer'
def __init__( self, A_=5_0000, A_=None, A_=768, A_=12, A_=12, A_=3072, A_="gelu", A_=0.1, A_=0.1, A_=1536, A_=2, A_=0.02, A_=1E-12, A_=0, A_=False, A_=True, **A_, ) -> Dict:
super().__init__(pad_token_id=A_, **A_ )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =hidden_size if embedding_size is None else embedding_size
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =type_vocab_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =rotary_value
UpperCAmelCase__ =use_cache
class snake_case_ ( a ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ ={0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ ={0: "batch", 1: "sequence"}
UpperCAmelCase__ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 625
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Tuple = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCAmelCase : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = True
__a = None
# Automatically constructed
__a = "PIL.Image.Image"
__a = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__a = field(default="""Image""" , init=A , repr=A )
def __call__( self : int ):
'''simple docstring'''
return self.pa_type
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCAmelCase : Optional[Any] = np.array(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase__ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : dict , UpperCamelCase : Any=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install \'Pillow\'.""" )
if token_per_repo_id is None:
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(UpperCAmelCase__ ):
__UpperCAmelCase : str = PIL.Image.open(UpperCAmelCase__ )
else:
__UpperCAmelCase : Dict = path.split("""::""" )[-1]
try:
__UpperCAmelCase : Union[str, Any] = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
__UpperCAmelCase : str = token_per_repo_id.get(UpperCAmelCase__ )
except ValueError:
__UpperCAmelCase : str = None
with xopen(UpperCAmelCase__ , """rb""" , use_auth_token=UpperCAmelCase__ ) as f:
__UpperCAmelCase : Any = BytesIO(f.read() )
__UpperCAmelCase : Dict = PIL.Image.open(bytes_ )
else:
__UpperCAmelCase : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__UpperCAmelCase : Any = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
__UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCAmelCase : Union[str, Any] = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
__UpperCAmelCase : Tuple = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__UpperCAmelCase : Dict = storage.field("""bytes""" )
else:
__UpperCAmelCase : int = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__UpperCAmelCase : Tuple = storage.field("""path""" )
else:
__UpperCAmelCase : Dict = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
__UpperCAmelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__UpperCAmelCase : Union[str, Any] = pa.array(
[encode_np_array(np.array(UpperCAmelCase__ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__UpperCAmelCase : Optional[int] = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
__UpperCAmelCase : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase : List[Any] ):
with xopen(UpperCAmelCase__ , """rb""" ) as f:
__UpperCAmelCase : str = f.read()
return bytes_
__UpperCAmelCase : int = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCAmelCase : List[str] = pa.array(
[os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__UpperCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__UpperCAmelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> bytes:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
__UpperCAmelCase : Tuple = image.format
else:
__UpperCAmelCase : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
__UpperCAmelCase : List[Any] = array.dtype
__UpperCAmelCase : int = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__UpperCAmelCase : str = dtype.kind
__UpperCAmelCase : int = dtype.itemsize
__UpperCAmelCase : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__UpperCAmelCase : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__UpperCAmelCase : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__UpperCAmelCase : Optional[Any] = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
__UpperCAmelCase : List[str] = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__UpperCAmelCase : List[Any] = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
if objs:
__UpperCAmelCase : List[str] = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
__UpperCAmelCase : Any = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : Optional[Any] = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 718
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : Any , UpperCamelCase : int = 1 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : int = 50 , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase : Optional[Any] = self.unet(UpperCamelCase , UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCamelCase ), "This is a local test"
| 299
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCAmelCase : Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( _A ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( _A, _A, _A ):
"""simple docstring"""
return max(metric_fn(_A, _A ) for gt in ground_truths )
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :int = [line.strip() for line in open(_A, "r" ).readlines()]
snake_case_ :Tuple = []
if args.gold_data_mode == "qa":
snake_case_ :Any = pd.read_csv(_A, sep="\t", header=_A )
for answer_list in data[1]:
snake_case_ :Union[str, Any] = ast.literal_eval(_A )
answers.append(_A )
else:
snake_case_ :List[str] = [line.strip() for line in open(_A, "r" ).readlines()]
snake_case_ :Optional[Any] = [[reference] for reference in references]
snake_case_ :int = 0
for prediction, ground_truths in zip(_A, _A ):
total += 1
em += metric_max_over_ground_truths(_A, _A, _A )
fa += metric_max_over_ground_truths(_A, _A, _A )
snake_case_ :Optional[int] = 100.0 * em / total
snake_case_ :List[Any] = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = args.k
snake_case_ :List[Any] = [line.strip() for line in open(_A, "r" ).readlines()]
snake_case_ :int = [line.strip() for line in open(_A, "r" ).readlines()]
snake_case_ :List[Any] = 0
for hypo, reference in zip(_A, _A ):
snake_case_ :Any = set(hypo.split("\t" )[:k] )
snake_case_ :int = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ :Dict = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def A ( _A, _A, _A ):
"""simple docstring"""
def strip_title(_A ):
if title.startswith("\"" ):
snake_case_ :str = title[1:]
if title.endswith("\"" ):
snake_case_ :Any = title[:-1]
return title
snake_case_ :Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A, return_tensors="pt", padding=_A, truncation=_A, )["input_ids"].to(args.device )
snake_case_ :Dict = rag_model.rag.question_encoder(_A )
snake_case_ :Any = question_enc_outputs[0]
snake_case_ :Tuple = rag_model.retriever(
_A, question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", )
snake_case_ :int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ :Tuple = []
for docs in all_docs:
snake_case_ :Any = [strip_title(_A ) for title in docs["title"]]
provenance_strings.append("\t".join(_A ) )
return provenance_strings
def A ( _A, _A, _A ):
"""simple docstring"""
with torch.no_grad():
snake_case_ :Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A, return_tensors="pt", padding=_A, truncation=_A )
snake_case_ :Optional[int] = inputs_dict.input_ids.to(args.device )
snake_case_ :str = inputs_dict.attention_mask.to(args.device )
snake_case_ :Union[str, Any] = rag_model.generate( # rag_model overwrites generate
_A, attention_mask=_A, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=_A, num_return_sequences=1, bad_words_ids=[[0, 0]], )
snake_case_ :Tuple = rag_model.retriever.generator_tokenizer.batch_decode(_A, skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A, _A ):
logger.info("Q: {} - A: {}".format(_A, _A ) )
return answers
def A ( ):
"""simple docstring"""
snake_case_ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_type", choices=["rag_sequence", "rag_token", "bart"], type=_A, help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
), )
parser.add_argument(
"--index_name", default=_A, choices=["exact", "compressed", "legacy"], type=_A, help="RAG model retriever type", )
parser.add_argument(
"--index_path", default=_A, type=_A, help="Path to the retrieval index", )
parser.add_argument("--n_docs", default=5, type=_A, help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path", default=_A, type=_A, required=_A, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", )
parser.add_argument(
"--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=_A, help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
), )
parser.add_argument("--k", default=1, type=_A, help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set", default=_A, type=_A, required=_A, help="Path to a file containing evaluation samples", )
parser.add_argument(
"--gold_data_path", default=_A, type=_A, required=_A, help="Path to a tab-separated file with gold samples", )
parser.add_argument(
"--gold_data_mode", default="qa", type=_A, choices=["qa", "ans"], help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
), )
parser.add_argument(
"--predictions_path", type=_A, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", )
parser.add_argument(
"--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", )
parser.add_argument(
"--eval_batch_size", default=8, type=_A, help="Batch size per GPU/CPU for evaluation.", )
parser.add_argument(
"--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", )
parser.add_argument(
"--num_beams", default=4, type=_A, help="Number of beams to be used when generating answers", )
parser.add_argument("--min_length", default=1, type=_A, help="Min length of the generated answers" )
parser.add_argument("--max_length", default=50, type=_A, help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", )
parser.add_argument(
"--print_docs", action="store_true", help="If True, prints docs retried while generating.", )
snake_case_ :int = parser.parse_args()
snake_case_ :List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A ( _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = {}
if args.model_type is None:
snake_case_ :Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
snake_case_ :Union[str, Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
snake_case_ :int = args.n_docs
if args.index_name is not None:
snake_case_ :Any = args.index_name
if args.index_path is not None:
snake_case_ :List[str] = args.index_path
else:
snake_case_ :List[str] = BartForConditionalGeneration
snake_case_ :List[str] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", _A )
snake_case_ :Dict = get_scores if args.eval_mode == "e2e" else get_precision_at_k
snake_case_ :Optional[int] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_A, args.predictions_path, args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_A ) )
logger.info(" Batch size = %d", args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
snake_case_ :Optional[int] = RagRetriever.from_pretrained(_A, **_A )
snake_case_ :List[str] = model_class.from_pretrained(_A, retriever=_A, **_A )
model.retriever.init_retrieval()
else:
snake_case_ :str = model_class.from_pretrained(_A, **_A )
model.to(args.device )
with open(args.evaluation_set, "r" ) as eval_file, open(args.predictions_path, "w" ) as preds_file:
snake_case_ :Tuple = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
snake_case_ :Any = evaluate_batch_fn(_A, _A, _A )
preds_file.write("\n".join(_A ) + "\n" )
preds_file.flush()
snake_case_ :Optional[int] = []
if len(_A ) > 0:
snake_case_ :Dict = evaluate_batch_fn(_A, _A, _A )
preds_file.write("\n".join(_A ) )
preds_file.flush()
score_fn(_A, args.predictions_path, args.gold_data_path )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = get_args()
main(args)
| 584
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCAmelCase : List[Any] = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
__UpperCAmelCase : Dict = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
__UpperCAmelCase : List[str] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase (datasets.Metric ):
'''simple docstring'''
def _a ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def _a ( self , a , a , a=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(a , a , sample_weight=a ) ),
}
| 584
| 1
|
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ , lowercase__ ):
def brightness(lowercase__ ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase__ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 492
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowercase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowercase__ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
_lowerCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
_lowerCamelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[Any] = len(self.sp_model ) + self.fairseq_offset
_lowerCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_lowerCamelCase : Tuple = self.__dict__.copy()
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : str = {}
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self , lowercase , lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
_lowerCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A_ ( self ):
_lowerCamelCase : Dict = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , lowercase ):
return self.sp_model.encode(lowercase , out_type=lowercase )
def A_ ( self , lowercase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Union[str, Any] = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self , lowercase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : int = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 492
| 1
|
"""simple docstring"""
def __lowerCAmelCase ( ):
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : List[str] = 2
while i * i <= n:
snake_case_ : str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCAmelCase ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 58
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 16, 4, 4] , _lowerCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :Dict = image_size
lowerCAmelCase__ :Optional[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :List[Any] = intermediate_size
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :Dict = scope
lowerCAmelCase__ :Any = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase__ :str = (self.image_size // 32) ** 2
lowerCAmelCase__ :Any = num_patches + 1
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Optional[int] = None
if self.use_labels:
lowerCAmelCase__ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :int = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_lowerCAmelCase , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = ViTHybridModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = self.type_sequence_label_size
lowerCAmelCase__ :List[str] = ViTHybridForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :int = config_and_inputs
lowerCAmelCase__ :Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
"""simple docstring"""
A = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = ViTHybridModelTester(self )
lowerCAmelCase__ :Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Tuple = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :List[str] = model_class(_lowerCAmelCase )
lowerCAmelCase__ :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :str = [*signature.parameters.keys()]
lowerCAmelCase__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Tuple = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ :Any = model_class(config=_lowerCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase__ :List[str] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Optional[int] = ViTHybridModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.default_image_processor
lowerCAmelCase__ :List[Any] = prepare_img()
lowerCAmelCase__ :Any = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :Tuple = model(**_lowerCAmelCase )
# verify the logits
lowerCAmelCase__ :Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
lowerCAmelCase__ :Any = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
lowerCAmelCase__ :int = prepare_img()
lowerCAmelCase__ :Tuple = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
lowerCAmelCase__ :List[str] = model(**_lowerCAmelCase )
lowerCAmelCase__ :int = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase__ :str = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 111
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = ShapEImgaImgPipeline
A = ['''image''']
A = ['''image''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 8
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ :int = CLIPVisionModel(_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase__ :int = PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ :str = ShapERenderer(**_lowerCAmelCase )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.dummy_prior
lowerCAmelCase__ :str = self.dummy_image_encoder
lowerCAmelCase__ :Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ :Union[str, Any] = self.dummy_renderer
lowerCAmelCase__ :str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase__ :Any = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :Dict = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = "cpu"
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Union[str, Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :int = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ :str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch_device == "cpu"
lowerCAmelCase__ :Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :List[Any] = 2
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ :Any = batch_size * [inputs[key]]
lowerCAmelCase__ :Optional[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase__ :Optional[int] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase__ :Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """hf-internal-testing/tiny-random-t5"""
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : Dict = tokenizer("""This is me""" , return_tensors="""pt""" )
UpperCAmelCase : Tuple = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : int = model.generate(**UpperCamelCase__ )
UpperCAmelCase : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : int = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[int] = """hf-internal-testing/tiny-random-t5"""
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase : Any = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 710
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 45
|
'''simple docstring'''
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ , snake_case__ : Optional[int] = [], []
while len(__SCREAMING_SNAKE_CASE ) > 1:
snake_case__ , snake_case__ : Tuple = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
start.append(__SCREAMING_SNAKE_CASE )
end.append(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 270
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( a__ , unittest.TestCase ):
_lowerCAmelCase = DDIMPipeline
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
def __magic_name__ ( self : Dict ):
torch.manual_seed(0 )
a_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ = DDIMScheduler()
a_ = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __magic_name__ ( self : str , lowercase__ : Any , lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
a_ = torch.manual_seed(lowercase__ )
else:
a_ = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
a_ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : Optional[Any] ):
a_ = '''cpu'''
a_ = self.get_dummy_components()
a_ = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
a_ = self.get_dummy_inputs(lowercase__ )
a_ = pipe(**lowercase__ ).images
a_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
a_ = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
a_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ , 1e-3 )
def __magic_name__ ( self : Optional[int] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self : Tuple ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __magic_name__ ( self : Dict ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __magic_name__ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any] ):
a_ = '''google/ddpm-cifar10-32'''
a_ = UNetaDModel.from_pretrained(lowercase__ )
a_ = DDIMScheduler()
a_ = DDIMPipeline(unet=lowercase__ , scheduler=lowercase__ )
ddim.to(lowercase__ )
ddim.set_progress_bar_config(disable=lowercase__ )
a_ = torch.manual_seed(0 )
a_ = ddim(generator=lowercase__ , eta=0.0 , output_type='''numpy''' ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a_ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : str ):
a_ = '''google/ddpm-ema-bedroom-256'''
a_ = UNetaDModel.from_pretrained(lowercase__ )
a_ = DDIMScheduler.from_pretrained(lowercase__ )
a_ = DDIMPipeline(unet=lowercase__ , scheduler=lowercase__ )
ddpm.to(lowercase__ )
ddpm.set_progress_bar_config(disable=lowercase__ )
a_ = torch.manual_seed(0 )
a_ = ddpm(generator=lowercase__ , output_type='''numpy''' ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
a_ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 143
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( a__ ):
def __magic_name__ ( self : List[str] ):
a_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ , '''num_attention_heads''' ) )
class __lowercase :
def __init__( self : Dict , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=1_3 , lowercase__ : List[Any]=3_2 , lowercase__ : List[str]=2 , lowercase__ : int=3 , lowercase__ : Optional[int]=6_4_0 , lowercase__ : Dict=4 , lowercase__ : Optional[int]="silu" , lowercase__ : Any=3 , lowercase__ : Optional[Any]=3_2 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Dict=0.02 , lowercase__ : Dict=True , lowercase__ : str=True , lowercase__ : Any=1_0 , lowercase__ : Union[str, Any]=None , ):
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = last_hidden_size
a_ = num_attention_heads
a_ = hidden_act
a_ = conv_kernel_size
a_ = output_stride
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = classifier_dropout_prob
a_ = use_labels
a_ = is_training
a_ = num_labels
a_ = initializer_range
a_ = scope
def __magic_name__ ( self : str ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Dict , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : str ):
a_ = MobileViTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Dict ):
a_ = self.num_labels
a_ = MobileViTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MobileViTForSemanticSegmentation(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a_ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : int ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __magic_name__ ( self : str ):
a_ = MobileViTModelTester(self )
a_ = MobileViTConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def __magic_name__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __magic_name__ ( self : Dict ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Tuple ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(lowercase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
pass
def __magic_name__ ( self : List[Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
def check_hidden_states_output(lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
a_ = outputs.hidden_states
a_ = 5
self.assertEqual(len(lowercase__ ) , lowercase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a_ = 2
for i in range(len(lowercase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def __magic_name__ ( self : Any ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@slow
def __magic_name__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = MobileViTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
a_ = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowercase__ )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
# verify the logits
a_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase__ )
a_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Any ):
a_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = model.to(lowercase__ )
a_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits
# verify the logits
a_ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowercase__ )
a_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowercase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[str] ):
a_ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = model.to(lowercase__ )
a_ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
a_ = prepare_img()
a_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ = model(**lowercase__ )
a_ = outputs.logits.detach().cpu()
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ , target_sizes=[(5_0, 6_0)] )
a_ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowercase__ )
a_ = image_processor.post_process_semantic_segmentation(outputs=lowercase__ )
a_ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowercase__ )
| 143
| 1
|
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self ):
return self._get_dummy_components()
def __magic_name__ ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
lowercase : List[str] = torch.manual_seed(_a )
else:
lowercase : Dict = torch.Generator(device=_a ).manual_seed(_a )
lowercase : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ ( self ):
self._test_save_load_local()
def __magic_name__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
# if
lowercase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase : List[str] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase , lowercase : int = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase : List[str] = None
lowercase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
lowercase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase : List[str] = IFInpaintingPipeline(**pipe_a.components )
lowercase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 361
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
_A = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_A = 10_24
_A = 40_96
_A = 24
_A = 16
_A = [5, 11, 17, 23]
_A = [2_56, 5_12, 10_24, 10_24]
_A = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = [2_56, 5_12, 7_68, 7_68]
_A = 1_50
_A = 16
_A = (1, 3_84, 3_84)
_A = False
_A = "project"
if "ade" in checkpoint_url:
_A = True
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = 1_50
_A = 16
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
_A = {int(_A ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __A ( _lowercase ):
'''simple docstring'''
_A = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __A ( _lowercase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_A = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_A = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_A = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_A = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_A = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_A = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_A = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_A = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_A = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_A = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_A = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_A = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_A = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_A = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_A = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_A = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_A = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_A = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_A = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_A = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_A = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_A = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_A = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_A = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_A = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_A = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_A = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_A = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_A = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_A = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_A = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_A = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_A = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_A = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __A ( ):
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = get_dpt_config(_A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_A = torch.load(_A , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_A )
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_A )
_A = val
# read in qkv matrices
read_in_q_k_v(_A , _A )
# load HuggingFace model
_A = DPTForSemanticSegmentation(_A ) if "ade" in checkpoint_url else DPTForDepthEstimation(_A )
model.load_state_dict(_A )
model.eval()
# Check outputs on an image
_A = 4_80 if "ade" in checkpoint_url else 3_84
_A = DPTImageProcessor(size=_A )
_A = prepare_img()
_A = image_processor(_A , return_tensors='''pt''' )
# forward pass
_A = model(**_A ).logits if "ade" in checkpoint_url else model(**_A ).predicted_depth
if show_prediction:
_A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 705
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = """wav2vec2"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="sum" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_size
lowerCAmelCase__ :List[str] = feat_extract_norm
lowerCAmelCase__ :Union[str, Any] = feat_extract_activation
lowerCAmelCase__ :List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = list(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = conv_bias
lowerCAmelCase__ :List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ :int = num_conv_pos_embedding_groups
lowerCAmelCase__ :str = len(self.conv_dim )
lowerCAmelCase__ :str = num_hidden_layers
lowerCAmelCase__ :Optional[int] = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Optional[int] = hidden_dropout
lowerCAmelCase__ :List[str] = attention_dropout
lowerCAmelCase__ :Tuple = activation_dropout
lowerCAmelCase__ :int = feat_proj_dropout
lowerCAmelCase__ :str = final_dropout
lowerCAmelCase__ :Tuple = layerdrop
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Any = do_stable_layer_norm
lowerCAmelCase__ :Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :int = apply_spec_augment
lowerCAmelCase__ :List[str] = mask_time_prob
lowerCAmelCase__ :Dict = mask_time_length
lowerCAmelCase__ :List[Any] = mask_time_min_masks
lowerCAmelCase__ :Tuple = mask_feature_prob
lowerCAmelCase__ :str = mask_feature_length
lowerCAmelCase__ :List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase__ :Optional[Any] = num_codevector_groups
lowerCAmelCase__ :Optional[int] = contrastive_logits_temperature
lowerCAmelCase__ :List[Any] = feat_quantizer_dropout
lowerCAmelCase__ :Optional[int] = num_negatives
lowerCAmelCase__ :Any = codevector_dim
lowerCAmelCase__ :List[Any] = proj_codevector_dim
lowerCAmelCase__ :str = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Tuple = ctc_loss_reduction
lowerCAmelCase__ :Optional[int] = ctc_zero_infinity
# adapter
lowerCAmelCase__ :Dict = add_adapter
lowerCAmelCase__ :Optional[int] = adapter_kernel_size
lowerCAmelCase__ :Dict = adapter_stride
lowerCAmelCase__ :List[str] = num_adapter_layers
lowerCAmelCase__ :Any = output_hidden_size or hidden_size
lowerCAmelCase__ :Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[int] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = xvector_output_dim
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 93
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
__lowerCamelCase = {
'camembert-base': 5_1_2,
}
__lowerCamelCase = '▁'
class _UpperCamelCase( __SCREAMING_SNAKE_CASE ):
__A: int = VOCAB_FILES_NAMES
__A: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A: Dict = ['input_ids', 'attention_mask']
__A: int = CamembertTokenizer
def __init__( self : List[str] , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple="<s>" , _lowerCamelCase : List[Any]="</s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : List[str]="<pad>" , _lowerCamelCase : Dict="<mask>" , _lowerCamelCase : str=["<s>NOTUSED", "</s>NOTUSED"] , **_lowerCamelCase : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : Any = False if not self.vocab_file else True
def a__ ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : str , _lowerCamelCase : str , _lowerCamelCase : int = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 721
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: List[str] = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[int] , _lowerCamelCase : int=80 , _lowerCamelCase : int=1_60_00 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]=25 , _lowerCamelCase : int="hamming_window" , _lowerCamelCase : List[Any]=3_27_68.0 , _lowerCamelCase : List[Any]=0.97 , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Optional[Any] , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Tuple = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Tuple = padding_value
_UpperCAmelCase : List[Any] = hop_length
_UpperCAmelCase : Union[str, Any] = win_length
_UpperCAmelCase : str = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : Optional[Any] = mel_floor
_UpperCAmelCase : Optional[Any] = normalize_means
_UpperCAmelCase : Optional[int] = normalize_vars
_UpperCAmelCase : Dict = win_function
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Tuple = win_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Optional[Any] = (self.n_fft // 2) + 1
def a__ ( self : Any , _lowerCamelCase : np.array ):
if self.win_function == "hamming_window":
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
else:
_UpperCAmelCase : str = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Dict = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCamelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def a__ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
# make sure we normalize float32 arrays
if self.normalize_means:
_UpperCAmelCase : Union[str, Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[Any] = np.subtract(_lowerCamelCase , _lowerCamelCase )
if self.normalize_vars:
_UpperCAmelCase : Optional[int] = x[:input_length].std(axis=0 )
_UpperCAmelCase : Any = np.divide(_lowerCamelCase , _lowerCamelCase )
if input_length < x.shape[0]:
_UpperCAmelCase : Tuple = padding_value
# make sure array is in float32
_UpperCAmelCase : int = x.astype(np.floataa )
return x
def a__ ( self : Any , _lowerCamelCase : List[np.ndarray] , _lowerCamelCase : Optional[np.ndarray] = None ):
_UpperCAmelCase : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCamelCase , _lowerCamelCase , self.padding_value ) for x, n in zip(_lowerCamelCase , _lowerCamelCase )]
def __call__( self : List[Any] , _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : str , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Optional[Any] = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : List[str] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
_UpperCAmelCase : int = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Dict = [self._extract_mfsc_features(_lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
# make sure list is in array format
_UpperCAmelCase : Dict = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _lowerCamelCase ):
_UpperCAmelCase : Tuple = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Optional[Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : int = self.normalize(
padded_inputs["input_features"] , attention_mask=_lowerCamelCase )
if return_tensors is not None:
_UpperCAmelCase : Optional[int] = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 328
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : Optional[Any] = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
@classmethod
def _lowercase ( cls , _lowercase , **_lowercase ):
"""simple docstring"""
try:
return super().from_pretrained(_lowercase , **_lowercase )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _lowercase , )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(feature_extractor=_lowercase , tokenizer=_lowercase )
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase = kwargs.pop("""audio""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""sampling_rate""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""text""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase )
_lowerCAmelCase = kwargs.pop("""input_features""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""labels""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if input_features is not None:
_lowerCAmelCase = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
if labels is not None:
_lowerCAmelCase = self.tokenizer.pad(_lowercase , **_lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase = labels["""input_ids"""]
return input_features
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
| 5
|
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
| 0
|
from math import factorial
def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 83
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __A :
a__ : Any = MBartConfig
a__ : Dict = {}
a__ : str = """gelu"""
def __init__(self : Union[str, Any] , __a : Union[str, Any] , __a : Dict=13 , __a : Tuple=7 , __a : int=True , __a : Tuple=False , __a : Dict=99 , __a : int=32 , __a : str=2 , __a : Optional[int]=4 , __a : Dict=37 , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=20 , __a : Dict=2 , __a : List[str]=1 , __a : Any=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def _lowercase (self : Any ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def _lowercase (self : List[str] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = TFMBartModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = inputs_dict["head_mask"]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
UpperCAmelCase_ = past_key_values[1]
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[Any]=None , snake_case_ : str=None , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a__ : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
a__ : List[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : int = True
a__ : Tuple = False
a__ : Any = False
def _lowercase (self : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : List[Any] , __a : int ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase (self : str ):
UpperCAmelCase_ = TFMBartModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
a__ : Any = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
a__ : List[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
a__ : Tuple = """facebook/mbart-large-en-ro"""
@cached_property
def _lowercase (self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase (self : str ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase (self : Optional[int] , **__a : Optional[int] ):
UpperCAmelCase_ = self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def _lowercase (self : Optional[Any] , **__a : Tuple ):
UpperCAmelCase_ = self.tokenizer(self.src_text , **__a , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase_ = self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def _lowercase (self : List[Any] ):
self._assert_generated_batch_equal_expected()
| 78
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer
pass
| 247
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: int ) -> int:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: str ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
UpperCamelCase_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 14410 )
UpperCamelCase_ = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 14410 )
def lowercase ( self: Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 14410 )
UpperCamelCase_ = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 14410 )
| 371
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_UpperCAmelCase = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = '''left'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Any="<s>" , _SCREAMING_SNAKE_CASE: Dict="</s>" , _SCREAMING_SNAKE_CASE: List[str]="<unk>" , _SCREAMING_SNAKE_CASE: Tuple="<sep>" , _SCREAMING_SNAKE_CASE: Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE: int="<cls>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<mask>" , _SCREAMING_SNAKE_CASE: List[str]=["<eop>", "<eod>"] , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
UpperCamelCase_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = 3
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = remove_space
UpperCamelCase_ = keep_accents
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def lowercase ( self: List[Any] ) -> Dict:
"""simple docstring"""
return len(self.sp_model )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
if self.remove_space:
UpperCamelCase_ = " ".join(inputs.strip().split() )
else:
UpperCamelCase_ = inputs
UpperCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
UpperCamelCase_ = unicodedata.normalize("NFKD" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
UpperCamelCase_ = outputs.lower()
return outputs
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.preprocess_text(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_ = cur_pieces[1:]
else:
UpperCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> Any:
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: bool = True , **_SCREAMING_SNAKE_CASE: Dict , ) -> str:
"""simple docstring"""
UpperCamelCase_ = kwargs.pop("use_source_tokenizer" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase_ = []
UpperCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = []
sub_texts.append(_SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(_SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase_ = "".join(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase_ = self.clean_up_tokenization(_SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 371
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = '''pt'''
lowercase__ = '''tf'''
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any ) -> int:
"""simple docstring"""
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = '''mock_framework'''
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
lowercase__ = MagicMock(return_value=UpperCamelCase_ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 43
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Optional[int] = False
class UpperCAmelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
A_ = torch.manual_seed(0 )
A_ = pipe(
image=__snake_case ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 188
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 144
|
import functools
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
# Validation
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCamelCase__ ) != 3 or not all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCamelCase__ ) == 0:
return 0
if min(lowerCamelCase__ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCamelCase__ ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase_ : int = set(lowerCamelCase__ )
@functools.cache
def dynamic_programming(lowerCamelCase__ ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[int] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 495
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 19
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =os.path.abspath(__UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
SCREAMING_SNAKE_CASE__ =tf.train.list_variables(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE__ =full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE__ =name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE__ =0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(__UpperCamelCase )
# read data
SCREAMING_SNAKE_CASE__ =tf.train.load_variable(__UpperCamelCase, __UpperCamelCase )
names.append("""/""".join(__UpperCamelCase ) )
arrays.append(__UpperCamelCase )
logger.info(f"""Read a total of {len(__UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(__UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(__UpperCamelCase ) )})""" )
SCREAMING_SNAKE_CASE__ =list(set(__UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(__UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =full_name.split("""/""" )
SCREAMING_SNAKE_CASE__ =model
SCREAMING_SNAKE_CASE__ =[]
for i, m_name in enumerate(__UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
SCREAMING_SNAKE_CASE__ =int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """embeddings""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """encoder""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """layer""" )
SCREAMING_SNAKE_CASE__ =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """pooler""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """attention""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """attention""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """output""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """attention""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """output""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """output""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """output""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """intermediate""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
SCREAMING_SNAKE_CASE__ =getattr(__UpperCamelCase, """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE__ =""".""".join(__UpperCamelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""", __UpperCamelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""", __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =array.reshape(pointer.data.shape )
if "kernel" in full_name:
SCREAMING_SNAKE_CASE__ =array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE__ =torch.from_numpy(__UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
SCREAMING_SNAKE_CASE__ =BertConfig.from_json_file(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =BertModel(__UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), __UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowerCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 151
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase_ ( __UpperCamelCase = 8 ):
SCREAMING_SNAKE_CASE__ =ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =i // 3
SCREAMING_SNAKE_CASE__ =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE__ =(
chars_incl
+ random(__UpperCamelCase, quotient + remainder )
+ random(__UpperCamelCase, __UpperCamelCase )
+ random(__UpperCamelCase, __UpperCamelCase )
)
SCREAMING_SNAKE_CASE__ =list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 8 ):
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE__ =any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in digits for char in password )
SCREAMING_SNAKE_CASE__ =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =int(input("""Please indicate the max length of your password: """ ).strip() )
SCREAMING_SNAKE_CASE__ =input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(__UpperCamelCase ) )
print(
"""Alternative Password generated:""", alternative_password_generator(__UpperCamelCase, __UpperCamelCase ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 151
| 1
|
import math
def __UpperCamelCase ( _A : Optional[Any] ) ->bool:
"""simple docstring"""
return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num
def __UpperCamelCase ( _A : Tuple ) ->bool:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =n
while left <= right:
lowerCamelCase_ =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCamelCase_ =mid - 1
else:
lowerCamelCase_ =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import os
from datetime import datetime as dt
from github import Github
__A : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" )
lowerCamelCase_ =repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase_ =comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 75
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : int = '''vivit'''
def __init__(self : Any , A__ : Optional[int]=2_2_4 , A__ : str=3_2 , A__ : Tuple=[2, 1_6, 1_6] , A__ : Optional[int]=3 , A__ : Any=7_6_8 , A__ : Union[str, Any]=1_2 , A__ : List[str]=1_2 , A__ : str=3_0_7_2 , A__ : Any="gelu_fast" , A__ : Dict=0.0 , A__ : Optional[int]=0.0 , A__ : int=0.0_2 , A__ : Optional[int]=1e-06 , A__ : Any=True , **A__ : str , ) -> int:
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = num_frames
lowercase = tubelet_size
lowercase = num_channels
lowercase = qkv_bias
super().__init__(**A__ )
| 310
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase_ ) ) )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=7_0000 ):
"""simple docstring"""
lowercase = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase_ ):
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = np.dot(x.T , h - y ) / y.size
lowercase = theta - alpha * gradient # updating the weights
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = cost_function(lowerCAmelCase_ , lowerCAmelCase_ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : int = datasets.load_iris()
__lowerCamelCase : Any = iris.data[:, :2]
__lowerCamelCase : List[Any] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : Dict = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[int] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 310
| 1
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(UpperCAmelCase__ ) , version.parse(UpperCAmelCase__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ = None ):
"""simple docstring"""
__lowerCAmelCase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCAmelCase__ ):
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = requirement, None, None
else:
__lowerCAmelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCAmelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
__lowerCAmelCase, __lowerCAmelCase = match[0]
__lowerCAmelCase = want_full.split(',' ) # there could be multiple requirements
__lowerCAmelCase = {}
for w in want_range:
__lowerCAmelCase = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCAmelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
__lowerCAmelCase, __lowerCAmelCase = match[0]
__lowerCAmelCase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowerCAmelCase = '.'.join([str(UpperCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return
# check if any version is installed
try:
__lowerCAmelCase = importlib.metadata.version(UpperCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCAmelCase__ , UpperCAmelCase__ )
| 102
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
class snake_case_ ( _a , _a ):
"""simple docstring"""
__UpperCAmelCase =True
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (6_4,) , _A = 1 , _A = "silu" , _A = 4 , _A = 3_2 , _A = 3_2 , _A = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(_A , _A , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def A__ ( self , _A , _A=False ):
if isinstance(_A , (Encoder, Decoder) ):
__lowerCAmelCase = value
def A__ ( self , _A = True ):
__lowerCAmelCase = use_tiling
def A__ ( self ):
self.enable_tiling(_A )
def A__ ( self ):
__lowerCAmelCase = True
def A__ ( self ):
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
__lowerCAmelCase = {}
def fn_recursive_add_processors(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def A__ ( self , _A ):
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(_A ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(_A ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , _A ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , _A ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A = False , _A = True , _A = None , ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(_A ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=_A )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 102
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Tuple , *snake_case__ : Tuple , **snake_case__ : int ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : str , *snake_case__ : List[Any] , **snake_case__ : Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Tuple , *snake_case__ : Any , **snake_case__ : int ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Dict , *snake_case__ : Dict , **snake_case__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[str] , *snake_case__ : Tuple , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : str , *snake_case__ : List[str] , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : int , *snake_case__ : Dict , **snake_case__ : Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[Any] , *snake_case__ : int , **snake_case__ : List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Tuple , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[Any] , *snake_case__ : Dict , **snake_case__ : str ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : Any , **snake_case__ : List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : Tuple , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : int , *snake_case__ : int , **snake_case__ : List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : int , *snake_case__ : Any , **snake_case__ : Dict ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[str] , *snake_case__ : str , **snake_case__ : Any ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Dict , *snake_case__ : Any , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : str , *snake_case__ : str , **snake_case__ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Any , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : str ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : int , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : str ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : int ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : List[str] , *snake_case__ : List[str] , **snake_case__ : List[str] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Optional[Any] , *snake_case__ : str , **snake_case__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowercase_ (metaclass=__snake_case ):
lowerCAmelCase__ =["sentencepiece"]
def __init__( self : Dict , *snake_case__ : str , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 360
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( _UpperCamelCase ) -> str:
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
_a = MBartConfig(
is_decoder=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_UpperCamelCase , add_final_layer_norm=_UpperCamelCase , )
return encoder_config, decoder_config
def __snake_case ( _UpperCamelCase ) -> Dict:
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ) -> Optional[int]:
# load original model
_a = DonutModel.from_pretrained(_UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(_UpperCamelCase )
_a = DonutSwinModel(_UpperCamelCase )
_a = MBartForCausalLM(_UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(_UpperCamelCase , from_slow=_UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(_UpperCamelCase , _UpperCamelCase )
_a = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , _UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(_UpperCamelCase )
_a , _a = model.encoder.embeddings(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
_a = original_model.encoder(_UpperCamelCase )
_a = model.encoder(_UpperCamelCase ).last_hidden_state
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
_a = original_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).logits
_a = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCamelCase :Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487
| 0
|
from __future__ import annotations
def A(__a: str , __a: list[str] | None = None , __a: dict[str, float] | None = None , __a: bool = False , ):
lowerCAmelCase_ = cipher_alphabet or [chr(__a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase_ = {
"a": 0.0_8497,
"b": 0.0_1492,
"c": 0.0_2202,
"d": 0.0_4253,
"e": 0.1_1162,
"f": 0.0_2228,
"g": 0.0_2015,
"h": 0.0_6094,
"i": 0.0_7546,
"j": 0.0_0153,
"k": 0.0_1292,
"l": 0.0_4025,
"m": 0.0_2406,
"n": 0.0_6749,
"o": 0.0_7507,
"p": 0.0_1929,
"q": 0.0_0095,
"r": 0.0_7587,
"s": 0.0_6327,
"t": 0.0_9356,
"u": 0.0_2758,
"v": 0.0_0978,
"w": 0.0_2560,
"x": 0.0_0150,
"y": 0.0_1994,
"z": 0.0_0077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase_ = frequencies_dict
if not case_sensitive:
lowerCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(__a ) ):
lowerCAmelCase_ = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
__a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.lower().count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__a: int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase_ = min(
__a , key=__a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 226
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> Dict:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 3
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_a )
@property
def __a ( self ) -> List[str]:
def extract(*_a , **_a ):
class __magic_name__ :
def __init__( self ) -> List[str]:
lowerCAmelCase_ = torch.ones([0] )
def __a ( self , _a ) -> int:
self.pixel_values.to(_a )
return self
return Out()
return extract
def __a ( self ) -> Dict:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ = unet.half()
lowerCAmelCase_ = vae.half()
lowerCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ = 4003660346
lowerCAmelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ = 2734971755
lowerCAmelCase_ = 7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> int:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ = 1044355234
lowerCAmelCase_ = 12
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 226
| 1
|
from scipy.stats import pearsonr
import datasets
a ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : List[Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False):
if return_pvalue:
__lowerCamelCase : str = pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)[0])}
| 652
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.")
UpperCamelCase__ : Union[str, Any] =model
UpperCamelCase__ : List[str] =kwargs.get("model_save_dir" , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =kwargs.get("latest_model_name" , __SCREAMING_SNAKE_CASE)
def __call__( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ={k: np.array(__SCREAMING_SNAKE_CASE) for k, v in kwargs.items()}
return self.model.run(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None) -> Optional[int]:
"""simple docstring"""
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider")
UpperCamelCase__ : Optional[Any] ="CPUExecutionProvider"
return ort.InferenceSession(__SCREAMING_SNAKE_CASE , providers=[provider] , sess_options=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(self.latest_model_name)
UpperCamelCase__ : List[str] =Path(__SCREAMING_SNAKE_CASE).joinpath(__SCREAMING_SNAKE_CASE)
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE)
if src_path.exists():
UpperCamelCase__ : int =Path(__SCREAMING_SNAKE_CASE).joinpath(__SCREAMING_SNAKE_CASE)
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
except shutil.SameFileError:
pass
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(__SCREAMING_SNAKE_CASE):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''')
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
# saving model weights/files
self._save_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[Any] =OnnxRuntimeModel.load_model(
os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , provider=__SCREAMING_SNAKE_CASE , sess_options=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =Path(__SCREAMING_SNAKE_CASE)
# load model from hub
else:
# download model
UpperCamelCase__ : List[Any] =hf_hub_download(
repo_id=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] =Path(__SCREAMING_SNAKE_CASE).parent
UpperCamelCase__ : List[str] =Path(__SCREAMING_SNAKE_CASE).name
UpperCamelCase__ : Tuple =OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE , provider=__SCREAMING_SNAKE_CASE , sess_options=__SCREAMING_SNAKE_CASE)
return cls(model=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str =None
if len(str(__SCREAMING_SNAKE_CASE).split("@")) == 2:
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =model_id.split("@")
return cls._from_pretrained(
model_id=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 582
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase = 4
__UpperCAmelCase = 3
class lowercase__( snake_case__ ):
'''simple docstring'''
pass
def _lowerCamelCase ( A_ : List[str] ) -> Dict:
'''simple docstring'''
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =int(os.environ["RANK"] )
UpperCamelCase__ : Optional[Any] =int(os.environ["WORLD_SIZE"] )
UpperCamelCase__ : Any =ArgumentParser()
parser.add_argument("--streaming" , type=A_ )
parser.add_argument("--local_rank" , type=A_ )
parser.add_argument("--num_workers" , type=A_ , default=0 )
UpperCamelCase__ : List[Any] =parser.parse_args()
UpperCamelCase__ : Union[str, Any] =args.streaming
UpperCamelCase__ : Optional[int] =args.num_workers
UpperCamelCase__ : Optional[Any] ={"shards": [f'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
UpperCamelCase__ : Union[str, Any] =IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
UpperCamelCase__ : Optional[Any] =Dataset.from_list(list(A_ ) )
UpperCamelCase__ : int =split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
UpperCamelCase__ : Any =torch.utils.data.DataLoader(A_ , num_workers=A_ )
UpperCamelCase__ : List[Any] =NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ : Union[str, Any] =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase__ : Optional[int] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase_ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase = comments[0] if len(lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 673
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowerCAmelCase ( *lowerCAmelCase ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = list(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowerCAmelCase ( lowerCAmelCase = None , lowerCAmelCase = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(lowerCAmelCase , starting_batch_size=lowerCAmelCase )
UpperCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase , **lowerCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase = list(inspect.signature(lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase ) < (len(lowerCAmelCase ) + 1):
UpperCAmelCase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 673
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( _UpperCAmelCase ):
for i in range(0, _UpperCAmelCase ):
for _ in range(0, n - i - 1 ): # printing spaces
print(" ", end="" )
for _ in range(0, i + 1 ): # printing stars
print("* ", end="" )
print()
def __UpperCamelCase ( _UpperCAmelCase ):
for i in range(_UpperCAmelCase, 0, -1 ):
for _ in range(_UpperCAmelCase, 0, -1 ): # printing stars
print("* ", end="" )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(" ", end="" )
def __UpperCamelCase ( _UpperCAmelCase ):
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_UpperCAmelCase ) # upper half
reverse_floyd(_UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
lowerCAmelCase__ : int = 1
while K:
lowerCAmelCase__ : Optional[Any] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
lowerCAmelCase__ : Optional[int] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 329
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : int = 6 ):
"""simple docstring"""
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
self.create_linked_list(UpperCAmelCase_ )
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = Node()
__UpperCAmelCase : Optional[Any] = current_node
__UpperCAmelCase : Optional[Any] = current_node
__UpperCAmelCase : int = current_node
for _ in range(1 , UpperCAmelCase_ ):
__UpperCAmelCase : str = Node()
__UpperCAmelCase : Union[str, Any] = current_node
__UpperCAmelCase : int = previous_node
__UpperCAmelCase : Optional[int] = current_node
__UpperCAmelCase : Dict = self.front
__UpperCAmelCase : int = previous_node
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__UpperCAmelCase : List[Any] = self.rear.next
if self.rear:
__UpperCAmelCase : Optional[Any] = data
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__UpperCAmelCase : str = self.front.data
__UpperCAmelCase : Tuple = None
return data
__UpperCAmelCase : List[str] = self.front
__UpperCAmelCase : List[str] = old_front.next
__UpperCAmelCase : Optional[Any] = old_front.data
__UpperCAmelCase : Union[str, Any] = None
return data
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise Exception("Empty Queue" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any | None = None
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = 1_0
lowerCamelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowerCAmelCase : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase = FILE_CONTENT
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with gzip.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lza.frame.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase_ , 'w' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
import lzma
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lzma.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
import zipfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with zstd.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowerCAmelCase : int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase = datasets.Dataset.from_dict(UpperCamelCase_ )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCamelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase_ , 'rb' ) as f:
lowerCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase_ , 'wb' ) as f:
lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
lowerCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 246
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : List[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_lowerCAmelCase : int = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_lowerCAmelCase : int = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_lowerCAmelCase : Optional[int] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_lowerCAmelCase : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self : int , __snake_case : List[Any] , __snake_case : int , __snake_case : Any=[1, 10, 100] , __snake_case : str=4 , __snake_case : List[Any]=3.0 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__snake_case ) as executor:
lowerCamelCase = []
lowerCamelCase = Counter()
lowerCamelCase = 0
lowerCamelCase = defaultdict(__snake_case )
for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ):
for candidate in candidates:
lowerCamelCase = candidate + '\n' + test_case
lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase = executor.submit(__snake_case , *__snake_case )
futures.append(__snake_case )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__snake_case ):
lowerCamelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase , lowerCamelCase = [], []
for result in results.values():
result.sort()
lowerCamelCase = [r[1]['passed'] for r in result]
total.append(len(__snake_case ) )
correct.append(sum(__snake_case ) )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = k
lowerCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
def estimator(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase = itertools.repeat(UpperCamelCase_ , len(UpperCamelCase_ ) )
else:
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCamelCase = iter(UpperCamelCase_ )
return np.array([estimator(int(UpperCamelCase_ ) , int(UpperCamelCase_ ) , UpperCamelCase_ ) for n, c in zip(UpperCamelCase_ , UpperCamelCase_ )] )
| 246
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[torch.FloatTensor] = None
lowerCamelCase__ : torch.FloatTensor = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
lowerCamelCase__ : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=5_1_2, lowerCamelCase_="cls", lowerCamelCase_=False, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : List[str] = project_dim
lowerCamelCase__ : int = pooler_fn
lowerCamelCase__ : Dict = learn_encoder
lowerCamelCase__ : Dict = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = [R'pooler', R'logit_scale']
lowerCamelCase__ : Optional[Any] = [R'position_ids', R'predictions.decoder.bias']
lowerCamelCase__ : str = 'roberta'
lowerCamelCase__ : List[str] = RobertaSeriesConfig
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = XLMRobertaModel(lowerCamelCase_ )
lowerCamelCase__ : List[str] = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : int = getattr(lowerCamelCase_, 'has_pre_transformation', lowerCamelCase_ )
if self.has_pre_transformation:
lowerCamelCase__ : int = nn.Linear(config.hidden_size, config.project_dim )
lowerCamelCase__ : Optional[int] = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def a__ (self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] = self.base_model(
input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, position_ids=lowerCamelCase_, head_mask=lowerCamelCase_, inputs_embeds=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_attentions=lowerCamelCase_, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCamelCase_, )
if self.has_pre_transformation:
lowerCamelCase__ : Any = outputs['hidden_states'][-2]
lowerCamelCase__ : int = self.pre_LN(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
lowerCamelCase__ : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 696
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = IFPipeline
_UpperCamelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCamelCase__ ( self , A_ , A_=0 ) ->int:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
__lowerCAmelCase : List[str] = torch.manual_seed(A_ )
else:
__lowerCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__lowerCAmelCase : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__lowerCAmelCase, __lowerCAmelCase : str = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowerCAmelCase : Dict = IFImgaImgPipeline(**pipe_a.components )
__lowerCAmelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowerCAmelCase : str = IFInpaintingPipeline(**pipe_a.components )
__lowerCAmelCase : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
__lowerCAmelCase : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__lowerCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : int = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(A_ , A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : str = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
__lowerCAmelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : str = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(A_ , A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
_start_torch_memory_measurement()
__lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A_ )
__lowerCAmelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : Tuple = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A_ )
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(A_ )
__lowerCAmelCase : List[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
__lowerCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(A_ , A_ )
def _lowercase ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 492
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class __lowercase (_UpperCAmelCase ):
def __init__( self , *A_ , **A_ ) ->None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 492
| 1
|
'''simple docstring'''
__lowerCAmelCase : Dict = 0 # The first color of the flag.
__lowerCAmelCase : Optional[Any] = 1 # The second color of the flag.
__lowerCAmelCase : List[Any] = 2 # The third color of the flag.
__lowerCAmelCase : int = (red, white, blue)
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
if not sequence:
return []
if len(UpperCamelCase__ ) == 1:
return list(UpperCamelCase__ )
__UpperCAmelCase = 0
__UpperCAmelCase = len(UpperCamelCase__ ) - 1
__UpperCAmelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
__UpperCAmelCase , __UpperCAmelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__UpperCAmelCase , __UpperCAmelCase = sequence[high], sequence[mid]
high -= 1
else:
__UpperCAmelCase = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = input("Enter numbers separated by commas:\n").strip()
__lowerCAmelCase : int = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 654
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = IFInpaintingPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
__A : Dict = torch.manual_seed(lowerCamelCase )
else:
__A : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__A : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__A : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__A : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 111
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCamelCase = logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
__A : List[Any] = self.layer[current_layer](lowerCamelCase , lowerCamelCase , head_mask[current_layer] )
__A : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase )
__A : Any = BertEncoderWithPabee(lowerCamelCase )
self.init_weights()
__A : Optional[Any] = 0
__A : Union[str, Any] = 0
__A : Dict = 0
__A : List[Any] = 0
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Any = threshold
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = patience
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = 0
__A : int = 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.inference_layers_num / self.inference_instances_num
__A : int = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__A : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
__A : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__A : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A : Any = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__A : List[Any] = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__A ,__A ,__A : Tuple = encoder_hidden_states.size()
__A : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__A : List[str] = torch.ones(lowerCamelCase , device=lowerCamelCase )
__A : Dict = self.invert_attention_mask(lowerCamelCase )
else:
__A : Union[str, Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A : Any = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__A : Tuple = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__A : str = embedding_output
if self.training:
__A : Dict = []
for i in range(self.config.num_hidden_layers ):
__A : str = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
__A : Optional[Any] = self.pooler(lowerCamelCase )
__A : Optional[Any] = output_layers[i](output_dropout(lowerCamelCase ) )
res.append(lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
__A : int = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__A : Optional[Any] = self.pooler(encoder_outputs[0] )
__A : List[str] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase )]
else:
__A : int = 0
__A : List[str] = None
__A : Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__A : List[str] = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
__A : List[str] = self.pooler(lowerCamelCase )
__A : str = output_layers[i](lowerCamelCase )
if regression:
__A : int = logits.detach()
if patient_result is not None:
__A : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__A : Any = 0
else:
__A : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__A : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase ) ):
patient_counter += 1
else:
__A : Union[str, Any] = 0
__A : int = logits
if patient_counter == self.patience:
break
__A : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase )
__A : Union[str, Any] = config.num_labels
__A : Union[str, Any] = BertModelWithPabee(lowerCamelCase )
__A : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
__A : int = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = self.bert(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__A : Any = (logits[-1],)
if labels is not None:
__A : Optional[Any] = None
__A : int = 0
for ix, logits_item in enumerate(lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
__A : Union[str, Any] = MSELoss()
__A : str = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__A : Optional[int] = CrossEntropyLoss()
__A : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__A : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__A : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 111
| 1
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
a_ = True
from torch.cuda.amp import autocast
a_ = logging.getLogger(__name__)
def __lowerCAmelCase ( A_ : Optional[int]=None , A_ : Tuple=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCAmelCase__ : Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ : Optional[str] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCAmelCase__ : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase__ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '\"\"', '%', '\'', '\"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : WavaVecaProcessor
lowerCAmelCase__ : Union[bool, str] = True
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = None
def __call__( self: List[Any] , __lowerCAmelCase: List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__UpperCAmelCase = [{"input_values": feature["input_values"]} for feature in features]
__UpperCAmelCase = [{"input_ids": feature["labels"]} for feature in features]
__UpperCAmelCase = self.processor.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__UpperCAmelCase = self.processor.pad(
labels=lowerCamelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCAmelCase = labels
return batch
class UpperCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: nn.Module , __lowerCAmelCase: Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
__UpperCAmelCase = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
__UpperCAmelCase = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
else:
__UpperCAmelCase = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
__UpperCAmelCase = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(A_ : List[Any] ):
__UpperCAmelCase = re.sub(__lowerCAmelCase , "" , batch["sentence"] ).lower() + " "
return batch
__UpperCAmelCase = train_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
__UpperCAmelCase = eval_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(A_ : Optional[int] ):
__UpperCAmelCase = " ".join(batch["text"] )
__UpperCAmelCase = list(set(__lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=train_dataset.column_names , )
__UpperCAmelCase = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
__UpperCAmelCase = {v: k for k, v in enumerate(__lowerCAmelCase )}
__UpperCAmelCase = vocab_dict[" "]
del vocab_dict[" "]
__UpperCAmelCase = len(__lowerCAmelCase )
__UpperCAmelCase = len(__lowerCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
__UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
__UpperCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
__UpperCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
__UpperCAmelCase = train_dataset.select(range(__lowerCAmelCase ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A_ : Optional[int] ):
__UpperCAmelCase = torchaudio.load(batch["path"] )
__UpperCAmelCase = resampler(__lowerCAmelCase ).squeeze().numpy()
__UpperCAmelCase = 1_60_00
__UpperCAmelCase = batch["text"]
return batch
__UpperCAmelCase = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__UpperCAmelCase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(__lowerCAmelCase )
return batch
__UpperCAmelCase = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase = datasets.load_metric("wer" )
def compute_metrics(A_ : List[Any] ):
__UpperCAmelCase = pred.predictions
__UpperCAmelCase = np.argmax(__lowerCAmelCase , axis=-1 )
__UpperCAmelCase = processor.tokenizer.pad_token_id
__UpperCAmelCase = processor.batch_decode(__lowerCAmelCase )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=__lowerCAmelCase )
__UpperCAmelCase = wer_metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase = DataCollatorCTCWithPadding(processor=__lowerCAmelCase , padding=__lowerCAmelCase )
# Initialize our Trainer
__UpperCAmelCase = CTCTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase = model_args.model_name_or_path
else:
__UpperCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
__UpperCAmelCase = train_result.metrics
__UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
__UpperCAmelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("train" , __lowerCAmelCase )
trainer.save_metrics("train" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowerCAmelCase )
__UpperCAmelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase_ = None
try:
import msvcrt
except ImportError:
lowercase_ = None
try:
import fcntl
except ImportError:
lowercase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase_ = OSError
# Data
# ------------------------------------------------
lowercase_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowercase_ = """3.0.12"""
lowercase_ = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = lock
return None
def __enter__( self : Any ):
"""simple docstring"""
return self.lock
def __exit__( self : str , _A : Any , _A : int , _A : Any ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
__SCREAMING_SNAKE_CASE : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE : str = None
# The default timeout value.
__SCREAMING_SNAKE_CASE : Any = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE : int = 0
return None
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = float(_A )
return None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ):
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE : Tuple = id(self )
__SCREAMING_SNAKE_CASE : Any = self._lock_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : int , _A : List[str]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE : Optional[int] = id(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__SCREAMING_SNAKE_CASE : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=_A )
return None
def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
__SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : str = fd
return None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self._lock_file_fd
__SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__SCREAMING_SNAKE_CASE : int = fd
return None
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
__SCREAMING_SNAKE_CASE : Any = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = fd
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase_ = None
if msvcrt:
lowercase_ = WindowsFileLock
elif fcntl:
lowercase_ = UnixFileLock
else:
lowercase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 74
| 1
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__( unittest.TestCase ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = 0
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __magic_name__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__UpperCAmelCase , """vocab.txt""" ) )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""bert""" , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__UpperCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__UpperCAmelCase , """merges.txt""" ) )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""gpt2""" , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__UpperCAmelCase , """vocab.txt""" ) )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""bert""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__UpperCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__UpperCAmelCase , """merges.txt""" ) )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""gpt2""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
with pytest.raises(__UpperCAmelCase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__UpperCAmelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowercase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TOKENIZER_MAPPING.values()
__lowercase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__UpperCAmelCase )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __UpperCAmelCase )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__UpperCAmelCase )
__lowercase = """Hello, world. How are you?"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowercase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = get_tokenizer_config("""bert-base-cased""" )
__lowercase = config.pop("""_commit_hash""" , __UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__UpperCAmelCase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase = get_tokenizer_config(__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = get_tokenizer_config(__UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def __magic_name__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
__lowercase = CustomTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = BertTokenizerFast.from_pretrained(__UpperCAmelCase )
bert_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = CustomTokenizerFast.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self ):
"""simple docstring"""
with self.assertRaises(__UpperCAmelCase ):
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def __magic_name__ ( self ):
"""simple docstring"""
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Optional[Any] = False
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : List[str] = NewTokenizer
UpperCamelCase : List[str] = False
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def __magic_name__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase = AutoTokenizer.from_pretrained("""bert-base""" )
def __magic_name__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase = AutoTokenizer.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 339
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase = k.replace(__UpperCamelCase , __UpperCamelCase )
if k.startswith("""encoder""" ):
__lowercase = k.replace(""".attn""" , """.self_attn""" )
__lowercase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowercase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowercase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowercase = sd.pop(__UpperCamelCase )
__lowercase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowercase = v
snake_case : int = ['START']
@torch.no_grad()
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ):
'''simple docstring'''
__lowercase = torch.load(__UpperCamelCase , map_location="""cpu""" )
__lowercase = model["""model"""]
__lowercase = BlenderbotConfig.from_json_file(__UpperCamelCase )
__lowercase = BlenderbotForConditionalGeneration(__UpperCamelCase )
__lowercase = m.model.state_dict().keys()
__lowercase = []
__lowercase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase = rename_state_dict_key(__UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__UpperCamelCase )
m.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
m.half()
m.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
snake_case : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 339
| 1
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ : Tuple = True
except ImportError:
lowerCamelCase__ : Tuple = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ : Tuple = _get_torch_home()
except ImportError:
lowerCamelCase__ : int = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
lowerCamelCase__ : List[str] = os.path.join(torch_cache_home, 'transformers')
lowerCamelCase__ : Any = 'https://cdn.huggingface.co'
lowerCamelCase__ : Optional[Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
lowerCamelCase__ : Union[str, Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
lowerCamelCase__ : Optional[int] = os.path.join(PATH, 'config.yaml')
lowerCamelCase__ : Any = os.path.join(PATH, 'attributes.txt')
lowerCamelCase__ : Tuple = os.path.join(PATH, 'objects.txt')
lowerCamelCase__ : Dict = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
lowerCamelCase__ : Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ : Optional[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ : List[str] = 'pytorch_model.bin'
lowerCamelCase__ : List[str] = 'config.yaml'
def UpperCAmelCase_ ( __UpperCAmelCase : str=OBJECTS , __UpperCAmelCase : List[str]=ATTRIBUTES ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
with open(__UpperCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
SCREAMING_SNAKE_CASE_ = []
with open(__UpperCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = OrderedDict()
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = pkl.load(__UpperCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE_ = ckp.pop(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCAmelCase )
else:
assert isinstance(__UpperCAmelCase , torch.tensor ), type(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
return r
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = {}
def __init__( self : int , _lowerCAmelCase : dict , _lowerCAmelCase : str = "root" , _lowerCAmelCase : str=0 ):
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = level
SCREAMING_SNAKE_CASE_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = Config(_lowerCAmelCase , name=_lowerCAmelCase , level=level + 1 )
SCREAMING_SNAKE_CASE_ = v
setattr(self , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = d
def __repr__( self : Dict ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = self._pointer
if len(_lowerCAmelCase ) > 1:
for i, l in enumerate(_lowerCAmelCase ):
if hasattr(self , _lowerCAmelCase ) and isinstance(getattr(self , _lowerCAmelCase ) , _lowerCAmelCase ):
setattr(getattr(self , _lowerCAmelCase ) , '.'.join(levels[i:] ) , _lowerCAmelCase )
if l == last_level:
SCREAMING_SNAKE_CASE_ = val
else:
SCREAMING_SNAKE_CASE_ = pointer[l]
def lowerCAmelCase_ ( self : int ):
return self._pointer
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
with open(F"{file_name}" , 'w' ) as stream:
dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
with open(F"{file_name}" , 'w' ) as stream:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : str ):
with open(_lowerCAmelCase ) as stream:
SCREAMING_SNAKE_CASE_ = load(_lowerCAmelCase , Loader=_lowerCAmelCase )
return data
def __str__( self : Tuple ):
SCREAMING_SNAKE_CASE_ = ' '
if self._name != "root":
SCREAMING_SNAKE_CASE_ = F"{t * (self._level-1)}{self._name}:\n"
else:
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(_lowerCAmelCase ).__name__})\n"
SCREAMING_SNAKE_CASE_ = level
return r[:-1]
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _lowerCAmelCase : str , **_lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
return cls(_lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : str , _lowerCAmelCase : str , **_lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = kwargs.pop('cache_dir' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('force_download' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('resume_download' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('proxies' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('local_files_only' , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
elif os.path.isfile(_lowerCAmelCase ) or is_remote_url(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE_ = hf_bucket_url(_lowerCAmelCase , filename=_lowerCAmelCase , use_cdn=_lowerCAmelCase )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ = cached_path(
_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , proxies=_lowerCAmelCase , resume_download=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE_ = Config.load_yaml(_lowerCAmelCase )
except EnvironmentError:
SCREAMING_SNAKE_CASE_ = 'Can\'t load config for'
raise EnvironmentError(_lowerCAmelCase )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_lowerCAmelCase ), kwargs
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = torch.load('dump.pt' , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE_ = in_tensor.numpy()
SCREAMING_SNAKE_CASE_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__UpperCAmelCase , __UpperCAmelCase , rtol=0.0_1 , atol=0.1 ), (
f"{sum([1 for x in np.isclose(__UpperCAmelCase , __UpperCAmelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = urlparse(__UpperCAmelCase )
return parsed.scheme in ("http", "https")
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=True ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE_ = '/' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=0 , __UpperCAmelCase : Tuple=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(__UpperCAmelCase , __UpperCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE_ = {'user-agent': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE_ = 'bytes=%d-' % (resume_size,)
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCAmelCase , stream=__UpperCAmelCase , proxies=__UpperCAmelCase , headers=__UpperCAmelCase )
if response.status_code == 4_16: # Range not satisfiable
return
SCREAMING_SNAKE_CASE_ = response.headers.get('Content-Length' )
SCREAMING_SNAKE_CASE_ = resume_size + int(__UpperCAmelCase ) if content_length is not None else None
SCREAMING_SNAKE_CASE_ = tqdm(
unit='B' , unit_scale=__UpperCAmelCase , total=__UpperCAmelCase , initial=__UpperCAmelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCAmelCase ) )
temp_file.write(__UpperCAmelCase )
progress.close()
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=10 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : int=None , __UpperCAmelCase : Any=False , ) -> List[str]:
if cache_dir is None:
SCREAMING_SNAKE_CASE_ = TRANSFORMERS_CACHE
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE_ = requests.head(__UpperCAmelCase , allow_redirects=__UpperCAmelCase , proxies=__UpperCAmelCase , timeout=__UpperCAmelCase )
if response.status_code == 2_00:
SCREAMING_SNAKE_CASE_ = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE_ = url_to_filename(__UpperCAmelCase , __UpperCAmelCase )
# get cache path to put the file
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCAmelCase ):
return cache_path
else:
SCREAMING_SNAKE_CASE_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCAmelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(__UpperCAmelCase ) > 0:
return os.path.join(__UpperCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE_ = cache_path + '.lock'
with FileLock(__UpperCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE_ = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(__UpperCAmelCase , 'a+b' ) as f:
yield f
SCREAMING_SNAKE_CASE_ = _resumable_file_manager
if os.path.exists(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = os.stat(__UpperCAmelCase ).st_size
else:
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = partial(tempfile.NamedTemporaryFile , dir=__UpperCAmelCase , delete=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , __UpperCAmelCase , temp_file.name , )
http_get(
__UpperCAmelCase , __UpperCAmelCase , proxies=__UpperCAmelCase , resume_size=__UpperCAmelCase , user_agent=__UpperCAmelCase , )
os.replace(temp_file.name , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {'url': url, 'etag': etag}
SCREAMING_SNAKE_CASE_ = cache_path + '.json'
with open(__UpperCAmelCase , 'w' ) as meta_file:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return cache_path
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = url.encode('utf-8' )
SCREAMING_SNAKE_CASE_ = shaaaa(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE_ = etag.encode('utf-8' )
SCREAMING_SNAKE_CASE_ = shaaaa(__UpperCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Any=False , __UpperCAmelCase : Any=False , ) -> int:
if cache_dir is None:
SCREAMING_SNAKE_CASE_ = TRANSFORMERS_CACHE
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCAmelCase )
if is_remote_url(__UpperCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE_ = get_from_cache(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , user_agent=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
elif os.path.exists(__UpperCAmelCase ):
# File, and it exists.
SCREAMING_SNAKE_CASE_ = url_or_filename
elif urlparse(__UpperCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(__UpperCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(__UpperCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCAmelCase ) and not tarfile.is_tarfile(__UpperCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE_ = os.path.split(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = output_file.replace('.' , '-' ) + '-extracted'
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ) and os.listdir(__UpperCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_ = output_path + '.lock'
with FileLock(__UpperCAmelCase ):
shutil.rmtree(__UpperCAmelCase , ignore_errors=__UpperCAmelCase )
os.makedirs(__UpperCAmelCase )
if is_zipfile(__UpperCAmelCase ):
with ZipFile(__UpperCAmelCase , 'r' ) as zip_file:
zip_file.extractall(__UpperCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = tarfile.open(__UpperCAmelCase )
tar_file.extractall(__UpperCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(__UpperCAmelCase ) )
return output_path_extracted
return output_path
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int="," ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
with open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ = eval(f.read() )
else:
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCAmelCase )
try:
SCREAMING_SNAKE_CASE_ = requests.json()
except Exception:
SCREAMING_SNAKE_CASE_ = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE_ = eval(__UpperCAmelCase )
except Exception:
SCREAMING_SNAKE_CASE_ = data.split('\n' )
req.close()
return data
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCAmelCase )
with open(__UpperCAmelCase , 'rb' ) as stream:
SCREAMING_SNAKE_CASE_ = pkl.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = weights.pop('model' )
SCREAMING_SNAKE_CASE_ = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase )
if "running_var" in k:
SCREAMING_SNAKE_CASE_ = torch.tensor([0] )
SCREAMING_SNAKE_CASE_ = k.replace('running_var' , 'num_batches_tracked' )
SCREAMING_SNAKE_CASE_ = zero
return new
def UpperCAmelCase_ ( ) -> Union[str, Any]:
print(f"{os.path.abspath(os.path.join(__UpperCAmelCase , os.pardir ) )}/demo.ipynb" )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]="RGB" ) -> Union[str, Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = cva.imread(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = get_image_from_url(__UpperCAmelCase )
assert img is not None, f"could not connect to: {im}"
SCREAMING_SNAKE_CASE_ = cva.cvtColor(__UpperCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE_ = img[:, :, ::-1]
return img
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int=1 ) -> str:
return (images[i : i + batch] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ))
| 31
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( snake_case_ ):
lowercase = 'convbert'
def __init__( self : Union[str, Any] , snake_case : Tuple=3_0_5_2_2 , snake_case : List[Any]=7_6_8 , snake_case : Any=1_2 , snake_case : Optional[int]=1_2 , snake_case : Optional[int]=3_0_7_2 , snake_case : Tuple="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : str=2 , snake_case : Tuple=0.02 , snake_case : Any=1e-12 , snake_case : List[str]=1 , snake_case : Any=0 , snake_case : Tuple=2 , snake_case : Any=7_6_8 , snake_case : Any=2 , snake_case : Tuple=9 , snake_case : int=1 , snake_case : str=None , **snake_case : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Dict = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Any = layer_norm_eps
UpperCamelCase_ : Union[str, Any] = embedding_size
UpperCamelCase_ : int = head_ratio
UpperCamelCase_ : Optional[Any] = conv_kernel_size
UpperCamelCase_ : Any = num_groups
UpperCamelCase_ : int = classifier_dropout
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 417
| 0
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Union[str, Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 616
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
def __init__( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=1_0 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[str]=3_2 * 8 , __UpperCAmelCase : List[Any]=3_2 * 8 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Tuple=6_4 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_size
SCREAMING_SNAKE_CASE__ = max_size
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = hidden_dim
SCREAMING_SNAKE_CASE__ = hidden_dim
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ = self.num_queries
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ = self.num_channels
SCREAMING_SNAKE_CASE__ = 6_4
SCREAMING_SNAKE_CASE__ = 1_2_8
SCREAMING_SNAKE_CASE__ = self.hidden_dim
SCREAMING_SNAKE_CASE__ = self.hidden_dim
SCREAMING_SNAKE_CASE__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=False ) -> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = MaskaFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(
pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ : Any = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ = MaskaFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=__UpperCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE__ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation(__UpperCAmelCase ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : Optional[Any] = 1E-4
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
SCREAMING_SNAKE_CASE__ = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 616
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : str = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase__ : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE )["last_hidden_state"].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : int = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase__ : str = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : Any = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )["last_hidden_state"].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 228
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[Any]=64 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE : Tuple=[4, 6, 8] , SCREAMING_SNAKE_CASE : Dict=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=2 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : int = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = depths
UpperCamelCase__ : Optional[Any] = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : str = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : int = initializer_range
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = LevitModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.hidden_states
UpperCamelCase__ : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ : Optional[int] = problem_type["title"]
UpperCamelCase__ : Tuple = problem_type["num_labels"]
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase__ : Any = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 228
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
snake_case : Union[str, Any] = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
snake_case : Optional[Any] = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
snake_case : List[str] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _lowercase ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def _lowercase ( self : Any , _A : Optional[Any] , _A : Any , _A : Any=None):
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A)),
}
| 182
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _lowercase ( self : List[Any]):
A__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_A).to(_A)
A__ : Any = AutoTokenizer.from_pretrained("google/mt5-small")
A__ : int = tokenizer("Hello there" , return_tensors="pt").input_ids
A__ : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
A__ : int = model(input_ids.to(_A) , labels=labels.to(_A)).loss
A__ : Optional[int] = -(labels.shape[-1] * loss.item())
A__ : List[str] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 182
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *_snake_case : int , **_snake_case : Any ) -> Optional[int]:
'''simple docstring'''
pass
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
a_ : Any =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _lowerCAmelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ) -> List[str]:
'''simple docstring'''
a__ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase ( self : List[str] , _snake_case : Tuple , _snake_case : int ) -> Tuple:
'''simple docstring'''
a__ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , _snake_case )
import datasets
a__ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
a__ = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , _snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
@slow
@require_torch
def _lowerCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
a__ = 'Intel/dpt-large'
a__ = pipeline('depth-estimation' , model=_snake_case )
a__ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
a__ = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 232
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Tuple ="mra"
def __init__( self : Union[str, Any] , _snake_case : List[str]=5_0265 , _snake_case : Union[str, Any]=768 , _snake_case : Union[str, Any]=12 , _snake_case : Any=12 , _snake_case : str=3072 , _snake_case : int="gelu" , _snake_case : Tuple=0.1 , _snake_case : int=0.1 , _snake_case : Tuple=512 , _snake_case : Optional[Any]=1 , _snake_case : Union[str, Any]=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : Optional[Any]="absolute" , _snake_case : List[Any]=4 , _snake_case : str="full" , _snake_case : Union[str, Any]=0 , _snake_case : Any=0 , _snake_case : str=1 , _snake_case : Union[str, Any]=0 , _snake_case : Optional[Any]=2 , **_snake_case : List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = block_per_row
a__ = approx_mode
a__ = initial_prior_first_n_blocks
a__ = initial_prior_diagonal_n_blocks
| 232
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[str] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__snake_case :Optional[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__snake_case :List[str] = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__snake_case :Union[str, Any] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__snake_case :Tuple = shift_tokens_right(a__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case :str = model(a__ , decoder_input_ids=a__ ).logits
__snake_case :int = optax.softmax_cross_entropy(a__ , onehot(a__ , logits.shape[-1] ) ).mean()
__snake_case :List[Any] = -(labels.shape[-1] * loss.item())
__snake_case :List[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 291
|
from __future__ import annotations
lowerCamelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( snake_case__ : Matrix ,snake_case__ : int ,snake_case__ : int ,snake_case__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(snake_case__ ):
__snake_case , __snake_case :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
__snake_case :Union[str, Any] = digit
if sudoku(snake_case__ ) is not None:
return grid
__snake_case :Tuple = 0
return None
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(snake_case__ ,end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowerCamelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 291
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ =tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ =tf_top_k_top_p_filtering(_lowerCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ =output[output != -float("inf" )]
UpperCAmelCase_ =tf.cast(
tf.where(tf.not_equal(_lowerCAmelCase , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-12 )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@require_tf
class A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_snake_case ={
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =2
UpperCAmelCase_ =2
class A ( tf.Module ):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
super(_lowerCAmelCase , self ).__init__()
UpperCAmelCase_ =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ =[[2, 0], [102, 103]]
UpperCAmelCase_ =[[1, 0], [1, 1]]
UpperCAmelCase_ =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"serving_default": dummy_model.serving} )
UpperCAmelCase_ =tf.saved_model.load(_lowerCAmelCase ).signatures["serving_default"]
for batch_size in range(1 , len(_lowerCAmelCase ) + 1 ):
UpperCAmelCase_ ={
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ =serving_func(**_lowerCAmelCase )["sequences"]
UpperCAmelCase_ =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =1
UpperCAmelCase_ =2
class A ( tf.Module ):
def __init__( self: Optional[int] , _lowerCAmelCase: Any ) -> Optional[Any]:
'''simple docstring'''
super(_lowerCAmelCase , self ).__init__()
UpperCAmelCase_ =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ =[[2], [102, 103]]
UpperCAmelCase_ =[[1], [1, 1]]
UpperCAmelCase_ =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"serving_default": dummy_model.serving} )
UpperCAmelCase_ =tf.saved_model.load(_lowerCAmelCase ).signatures["serving_default"]
for input_row in range(len(_lowerCAmelCase ) ):
UpperCAmelCase_ ={
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ =serving_func(**_lowerCAmelCase )["sequences"]
UpperCAmelCase_ =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_lowerCAmelCase )
class A ( tf.keras.layers.Layer ):
def __init__( self: List[str] ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowerCAmelCase , "spiece.model" ) , "rb" ).read() )
UpperCAmelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =text.pad_model_inputs(
_lowerCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ =self.model.generate(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
return self.tokenizer.detokenize(_lowerCAmelCase )
UpperCAmelCase_ =CompleteSentenceTransformer()
UpperCAmelCase_ =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
UpperCAmelCase_ =complete_model(_lowerCAmelCase )
UpperCAmelCase_ =tf.keras.Model(_lowerCAmelCase , _lowerCAmelCase )
keras_model.save(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
UpperCAmelCase_ =14
UpperCAmelCase_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ ="Hello, my dog is cute and"
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_tensors="tf" )
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
UpperCAmelCase_ =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ =[638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
UpperCAmelCase_ =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ ="Hugging Face is a technology company based in New York and Paris."
UpperCAmelCase_ =bart_tokenizer(_lowerCAmelCase , return_tensors="tf" ).input_ids
UpperCAmelCase_ =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase ).numpy()
class A ( __lowercase ):
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase , foo="bar" ).numpy()
self.assertTrue(np.array_equal(_lowerCAmelCase , _lowerCAmelCase ) )
class A ( bart_model.model.encoder.__class__ ):
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , **_lowerCAmelCase: Tuple ) -> int:
'''simple docstring'''
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase ).numpy()
with self.assertRaises(_lowerCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowerCAmelCase , foo="bar" )
| 54
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : List[str] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowercase ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Optional[Any]=None ):
if conf_path is None:
lowercase_ : str = '''./model_checkpoints/vqgan_only.yaml'''
lowercase_ : str = load_config(__snake_case , display=__snake_case )
lowercase_ : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase_ : List[str] = '''./model_checkpoints/vqgan_only.pt'''
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase_ : List[Any] = sd['''state_dict''']
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowercase ( __snake_case : Tuple , __snake_case : int ):
lowercase_ , lowercase_ , lowercase_ : List[Any] = model.encode(__snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowercase_ : Optional[int] = model.decode(__snake_case )
return xrec
def lowercase ( __snake_case : Any , __snake_case : List[str]=False ):
lowercase_ , lowercase_ : Optional[Any] = string.rsplit('''.''' , 1 )
if reload:
lowercase_ : Union[str, Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowercase ( __snake_case : List[Any] ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase ( __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple=True , __snake_case : Dict=True ):
lowercase_ : str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str ):
# load the specified checkpoint
if ckpt:
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location='''cpu''' )
lowercase_ : Any = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
lowercase_ : Optional[Any] = {'''state_dict''': None}
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__snake_case , eval_mode=__snake_case )['''model''']
return model, global_step
| 231
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a =' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCamelCase__ =self.diffusers_dir
shutil.copy(
os.path.join(_lowerCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _a ( self ):
lowerCamelCase__ ="src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCamelCase__ =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCamelCase__ =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase__ =black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
lowerCamelCase__ =os.path.join(self.diffusers_dir , "new_code.py" )
with open(_lowerCamelCase , "w" , newline="\n" ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , "r" ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _lowerCamelCase ) , )
# Copy consistency with a really long name
lowerCamelCase__ ="TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _lowerCamelCase , overwrite_result=re.sub("DDPM" , "Test" , _lowerCamelCase ) , )
| 132
|
"""simple docstring"""
import sys
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ =len(__lowerCAmelCase )
lowerCamelCase__ =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
lowerCamelCase__ =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase__ =a + chain_length - 1
lowerCamelCase__ =sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase__ =cost
lowerCamelCase__ =c
return matrix, sol
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if i == j:
print("A" + str(__lowerCAmelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(")" , end=" " )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =[30, 35, 15, 5, 10, 20, 25]
lowerCamelCase__ =len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase__ , lowerCamelCase__ =matrix_chain_order(__lowerCAmelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 132
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase : List[str] = 8
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=BITS ):
lowerCamelCase_: str = x.device
lowerCamelCase_: Dict = (x * 2_5_5).int().clamp(0 , 2_5_5 )
lowerCamelCase_: str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase )
lowerCamelCase_: Optional[int] = rearrange(_UpperCAmelCase , """d -> d 1 1""" )
lowerCamelCase_: Tuple = rearrange(_UpperCAmelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase_: Any = ((x & mask) != 0).float()
lowerCamelCase_: Union[str, Any] = rearrange(_UpperCAmelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase_: Any = bits * 2 - 1
return bits
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=BITS ):
lowerCamelCase_: str = x.device
lowerCamelCase_: List[Any] = (x > 0).int()
lowerCamelCase_: str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase , dtype=torch.intaa )
lowerCamelCase_: int = rearrange(_UpperCAmelCase , """d -> d 1 1""" )
lowerCamelCase_: List[Any] = rearrange(_UpperCAmelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase_: Any = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def UpperCAmelCase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = True , _UpperCAmelCase=None , _UpperCAmelCase = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase_: List[str] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase_: Optional[Any] = self.alphas_cumprod[timestep]
lowerCamelCase_: int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase_: int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_: Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase_: Any = self.bit_scale
if self.config.clip_sample:
lowerCamelCase_: Dict = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase_: List[Any] = self._get_variance(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: Dict = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase_: int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_: Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_: Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase_: List[str] = model_output.device if torch.is_tensor(_UpperCAmelCase ) else """cpu"""
lowerCamelCase_: Tuple = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
lowerCamelCase_: str = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) ** 0.5 * eta * noise
lowerCamelCase_: Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def UpperCAmelCase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="epsilon" , _UpperCAmelCase=None , _UpperCAmelCase = True , ):
lowerCamelCase_: Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase_ , lowerCamelCase_: str = torch.split(_UpperCAmelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase_: Optional[Any] = None
# 1. compute alphas, betas
lowerCamelCase_: Dict = self.alphas_cumprod[t]
lowerCamelCase_: Dict = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase_: Dict = 1 - alpha_prod_t
lowerCamelCase_: List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase_: Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase_: str = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowerCamelCase_: Optional[int] = self.bit_scale
if self.config.clip_sample:
lowerCamelCase_: Dict = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_: int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase_: int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_: Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase_: int = 0
if t > 0:
lowerCamelCase_: Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCAmelCase ).to(model_output.device )
lowerCamelCase_: str = (self._get_variance(_UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
lowerCamelCase_: Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : Optional[float] = 1.0 , ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_: Union[str, Any] = bit_scale
lowerCamelCase_: Optional[Any] = (
ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] , A_ : Optional[int] = 2_56 , A_ : Optional[int] = 2_56 , A_ : Optional[int] = 50 , A_ : Optional[torch.Generator] = None , A_ : Optional[int] = 1 , A_ : Optional[str] = "pil" , A_ : bool = True , **A_ : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
lowerCamelCase_: str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A_ , )
lowerCamelCase_: List[Any] = decimal_to_bits(A_ ) * self.bit_scale
lowerCamelCase_: str = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase_: List[str] = self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_: List[str] = self.scheduler.step(A_ , A_ , A_ ).prev_sample
lowerCamelCase_: Optional[int] = bits_to_decimal(A_ )
if output_type == "pil":
lowerCamelCase_: Tuple = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 423
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ):
lowerCamelCase_: List[str] = False
lowerCamelCase_: Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
lowerCamelCase_: Dict = True
elif "IPython" in sys.modules:
lowerCamelCase_: Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
lowerCamelCase_: str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
lowerCamelCase_: Optional[Any] = 8
lowerCamelCase_: List[Any] = PrepareForLaunch(_UpperCAmelCase , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase_: Tuple = PrepareForLaunch(_UpperCAmelCase , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_: List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
lowerCamelCase_: Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
| 423
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[int] = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase: str = False
def _snake_case ( self ) -> int:
super().setUp()
lowerCAmelCase : int = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
lowerCAmelCase : List[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowerCAmelCase : Dict = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
def _snake_case ( self , lowercase_ , lowercase_=False , lowercase_=20 , lowercase_=5 ) -> Tuple[str, list]:
lowerCAmelCase : Optional[int] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
lowerCAmelCase : List[str] = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
lowerCAmelCase : Union[str, Any] = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
lowerCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : List[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : List[Any] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
lowerCAmelCase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
lowerCAmelCase : str = """ """ + output_txt
lowerCAmelCase : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def _snake_case ( self , **lowercase_ ) -> int:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
lowerCAmelCase : Dict = tokenizer("""m xxx ɪ""" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
lowerCAmelCase : List[Any] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase : Tuple = tokenizer("""maɪ c""" , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 200] ) # mai should be <unk> (=3)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase : List[str] = """Hello how are you"""
lowerCAmelCase : int = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase : int = """Hello how are you"""
lowerCAmelCase : Any = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def _snake_case ( self ) -> Any:
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase : Dict = """Hello how are you"""
lowerCAmelCase : Optional[Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
lowerCAmelCase : List[str] = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase : Any = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : int = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase : Union[str, Any] = """Hello how are you"""
lowerCAmelCase : str = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowercase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def _snake_case ( self ) -> Any:
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase : int = """Hello how are you"""
lowerCAmelCase : Optional[int] = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
lowerCAmelCase : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase : str = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : Tuple = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
lowerCAmelCase : List[str] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase : str = """Hello how are you"""
lowerCAmelCase : List[Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
lowerCAmelCase : Tuple = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase : List[Any] = """Hello how are you"""
lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(lowercase_ , phonemizer_lang="""en-us""" )
lowerCAmelCase : Any = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowercase_ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowercase_ )
lowerCAmelCase : Any = """Hello how are you"""
lowerCAmelCase : Tuple = tokenizer(lowercase_ , phonemizer_lang="""en-us""" ).input_ids
lowerCAmelCase : List[str] = tokenizer(lowercase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
lowerCAmelCase : Optional[Any] = tokenizer.decode(lowercase_ )
lowerCAmelCase : Tuple = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowercase_ , """ɛ l o h aʊ a ʁ j u""" )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase : Optional[int] = """Hello how Are you"""
lowerCAmelCase : List[str] = """hello how are you"""
lowerCAmelCase : Optional[Any] = tokenizer(lowercase_ ).input_ids
lowerCAmelCase : Optional[int] = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
lowerCAmelCase : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> int:
lowerCAmelCase : Tuple = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self ) -> Any:
lowerCAmelCase : Any = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[int] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowercase_ , lowercase_ ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
lowerCAmelCase : str = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
lowerCAmelCase : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
lowerCAmelCase : List[str] = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def _snake_case ( self ) -> Dict:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def _snake_case ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : Any = tokenizer.vocab_size
lowerCAmelCase : List[str] = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase : Dict = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
lowerCAmelCase : str = tokenizer.add_tokens(lowercase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase : Optional[int] = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
lowerCAmelCase : Tuple = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase : int = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
lowerCAmelCase : Optional[Any] = tokenizer.add_special_tokens(lowercase_ )
lowerCAmelCase : Optional[int] = tokenizer.vocab_size
lowerCAmelCase : Any = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
lowerCAmelCase : Any = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _snake_case ( self ) -> List[str]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> int:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase : Optional[Any] = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : Optional[Any] = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
lowerCAmelCase : Any = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output["""text"""] , lowercase_ )
| 693
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693
| 1
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
__A : str = """path-to-your-trained-model"""
__A : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
__A : Optional[Any] = """A photo of sks dog in a bucket"""
__A : int = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 334
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = GPTaTokenizer
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True}
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase ) )
def __A ( self : Optional[int] , **lowerCAmelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Union[str, Any] , **lowerCAmelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __A ( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = "lower newer"
# Testing tokenization
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing the unknown token
UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : int ):
'''simple docstring'''
pass
def __A ( self : str , lowerCAmelCase : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ = tokenizer(*lowerCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = "$$$"
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase , add_bos_token=lowerCAmelCase )
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase , add_bos_token=lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = "Encode this."
UpperCAmelCase_ = "This one too please."
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
encoded_sequence += tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode_plus(
lowerCAmelCase , lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , )
UpperCAmelCase_ = encoded_sequence_dict["input_ids"]
UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
UpperCAmelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase )
]
UpperCAmelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase )
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# Same as above
self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase )
UpperCAmelCase_ = "bos"
UpperCAmelCase_ = tokenizer.get_vocab()["bos"]
UpperCAmelCase_ = "A photo of a cat"
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
# We changed the bos token
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ = tokenizer.encode(
lowerCAmelCase , )
self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
| 162
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''YolosFeatureExtractor''']
a__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 578
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ = logging.get_logger(__name__)
a__ = Dict[str, Any]
a__ = List[Prediction]
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> Optional[Any]:
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowercase ( self , **_a ) -> int:
_a : List[str] = {}
if "threshold" in kwargs:
_a : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self , *_a , **_a ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_a , **_a )
def __lowercase ( self , _a ) -> Any:
_a : Optional[int] = load_image(_a )
_a : str = torch.IntTensor([[image.height, image.width]] )
_a : Optional[Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
_a : Union[str, Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
_a : Optional[int] = target_size
return inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : str = model_inputs.pop('''target_size''' )
_a : Dict = self.model(**_a )
_a : List[Any] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
_a : int = model_inputs['''bbox''']
return model_outputs
def __lowercase ( self , _a , _a=0.9 ) -> Any:
_a : int = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_a , _a : Any = target_size[0].tolist()
def unnormalize(_a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
_a , _a : Tuple = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_a : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_a : Optional[Any] = [unnormalize(_a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
_a : Dict = ['''score''', '''label''', '''box''']
_a : Optional[int] = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_a : List[str] = self.image_processor.post_process_object_detection(_a , _a , _a )
_a : Optional[int] = raw_annotations[0]
_a : Any = raw_annotation['''scores''']
_a : Any = raw_annotation['''labels''']
_a : List[str] = raw_annotation['''boxes''']
_a : Union[str, Any] = scores.tolist()
_a : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels]
_a : Any = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_a : Tuple = ['''score''', '''label''', '''box''']
_a : Optional[int] = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def __lowercase ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
_a , _a , _a , _a : List[Any] = box.int().tolist()
_a : Optional[int] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 578
| 1
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19
| 0
|
"""simple docstring"""
import torch
def a__ ( ) -> Dict:
if torch.cuda.is_available():
_A = torch.cuda.device_count()
else:
_A = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 713
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main()
| 621
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.