code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase )
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_resize
__UpperCamelCase = do_rescale
__UpperCamelCase = do_normalize
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = rescale_factor
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__UpperCamelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> BatchFeature:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase )
if not is_batched(lowercase ):
__UpperCamelCase = [images]
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 349
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 25
|
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( _snake_case , _snake_case ):
"""simple docstring"""
snake_case_ = True
@register_to_config
def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 3 , lowerCamelCase__ = ("DownEncoderBlock2D",) , lowerCamelCase__ = ("UpDecoderBlock2D",) , lowerCamelCase__ = (64,) , lowerCamelCase__ = 1 , lowerCamelCase__ = "silu" , lowerCamelCase__ = 4 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = 0.1_82_15 , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[str]:
'''simple docstring'''
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowercase_ ( self , lowerCamelCase__ = True ) -> int:
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self.enable_tiling(_UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = True
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(_UpperCamelCase , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(_UpperCamelCase )
else:
__lowerCamelCase = self.encoder(_UpperCamelCase )
__lowerCamelCase = self.quant_conv(_UpperCamelCase )
__lowerCamelCase = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
__lowerCamelCase = self.post_quant_conv(_UpperCamelCase )
__lowerCamelCase = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(_UpperCamelCase )
else:
__lowerCamelCase = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> AutoencoderKLOutput:
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(_UpperCamelCase )
__lowerCamelCase = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
__lowerCamelCase = []
for i, row in enumerate(_UpperCamelCase ):
__lowerCamelCase = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
__lowerCamelCase = torch.cat(_UpperCamelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(_UpperCamelCase )
__lowerCamelCase = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
__lowerCamelCase = []
for i, row in enumerate(_UpperCamelCase ):
__lowerCamelCase = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
__lowerCamelCase = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=_UpperCamelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 90
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[Any] = "retribert"
def __init__( self : Dict , snake_case__ : Union[str, Any]=3_0_5_2_2 , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Optional[Any]=8 , snake_case__ : int=1_2 , snake_case__ : Optional[int]=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=1e-1_2 , snake_case__ : Any=True , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[int]=0 , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowercase :Any = vocab_size
lowercase :Optional[Any] = hidden_size
lowercase :str = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :Union[str, Any] = hidden_act
lowercase :Any = intermediate_size
lowercase :str = hidden_dropout_prob
lowercase :str = attention_probs_dropout_prob
lowercase :Optional[Any] = max_position_embeddings
lowercase :Union[str, Any] = type_vocab_size
lowercase :Any = initializer_range
lowercase :int = layer_norm_eps
lowercase :List[str] = share_encoders
lowercase :Union[str, Any] = projection_dim
| 172
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] = None
def lowerCamelCase (a_ :List[Any] , a_ :List[str]=0.9_99 , a_ :List[Any]="cosine" , ) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ :Union[str, Any]):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ :Tuple):
return math.exp(t * -12.0)
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""")
lowercase :str = []
for i in range(a_):
lowercase :Optional[Any] = i / num_diffusion_timesteps
lowercase :Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_) / alpha_bar_fn(a_) , a_))
return torch.tensor(a_ , dtype=torch.floataa)
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = 1
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.00_01 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 0 , snake_case__ : str = "epsilon" , snake_case__ : float = 1.0 , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
if kwargs.get('''set_alpha_to_one''' , snake_case__ ) is not None:
lowercase :Any = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
lowercase :Any = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :Optional[Any] = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Dict = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase :int = 1.0 - self.betas
lowercase :int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Any = 1.0
# setable values
lowercase :Dict = None
lowercase :int = torch.from_numpy(np.arange(0 , snake_case__ ).copy().astype(np.intaa ) )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Dict , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase :Any = num_inference_steps
lowercase :Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , snake_case__ ) * step_ratio).round().copy().astype(np.intaa )
lowercase :Any = torch.from_numpy(snake_case__ ).to(snake_case__ )
self.timesteps += self.config.steps_offset
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = False , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : bool = True , ):
'''simple docstring'''
lowercase :Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :str = model_output
elif self.config.prediction_type == "sample":
lowercase :List[Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 172
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[Any] = logging.get_logger(__name__)
def A ( a_ ,a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase : List[Any] =original_name.split('.' )[0]
__UpperCamelCase : Any =key.split('.' )
__UpperCamelCase : Any =int(key_list[key_list.index(a_ ) - 2] )
__UpperCamelCase : Optional[int] =int(key_list[key_list.index(a_ ) - 1] )
__UpperCamelCase : Tuple =orig_block_num - offset
__UpperCamelCase : Any =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' ,F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A ( a_ ) -> Optional[int]:
__UpperCamelCase : List[str] =OrderedDict()
__UpperCamelCase , __UpperCamelCase : Dict =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase : str =key.replace('network' ,'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase : int =key[: key.find('proj' )]
__UpperCamelCase : str =key.replace(a_ ,F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase : int =key.replace('proj' ,'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase : str ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'mlp.fc1' ,'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'mlp.fc2' ,'output.conv2' )
if "norm1" in key:
__UpperCamelCase : Any =replace_key_with_offset(a_ ,a_ ,'norm1' ,'before_norm' )
if "norm2" in key:
__UpperCamelCase : List[str] =replace_key_with_offset(a_ ,a_ ,'norm2' ,'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase : Tuple =replace_key_with_offset(a_ ,a_ ,'layer_scale_1' ,'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase : Dict =replace_key_with_offset(a_ ,a_ ,'layer_scale_2' ,'layer_scale_2' )
if "head" in key:
__UpperCamelCase : List[Any] =key.replace('head' ,'classifier' )
__UpperCamelCase : Optional[Any] =value
return new_state_dict
def A ( ) -> int:
__UpperCamelCase : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase : Dict =Image.open(requests.get(a_ ,stream=a_ ).raw )
return image
@torch.no_grad()
def A ( a_ ,a_ ,a_ ) -> List[str]:
__UpperCamelCase : Optional[Any] =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase : Union[str, Any] ='huggingface/label-files'
__UpperCamelCase : List[Any] =model_name[-3:]
__UpperCamelCase : Optional[int] =1_000
__UpperCamelCase : str ='imagenet-1k-id2label.json'
__UpperCamelCase : Any =(1, 1_000)
# set config attributes
__UpperCamelCase : Tuple =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Optional[Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase : Tuple =[2, 2, 6, 2]
__UpperCamelCase : List[Any] =[64, 128, 320, 512]
__UpperCamelCase : Any =4.0
__UpperCamelCase : Tuple =0.9
elif size == "s24":
__UpperCamelCase : str =[4, 4, 12, 4]
__UpperCamelCase : Optional[Any] =[64, 128, 320, 512]
__UpperCamelCase : int =4.0
__UpperCamelCase : Union[str, Any] =0.9
elif size == "s36":
__UpperCamelCase : int =[6, 6, 18, 6]
__UpperCamelCase : int =[64, 128, 320, 512]
__UpperCamelCase : List[Any] =4.0
__UpperCamelCase : Any =1e-6
__UpperCamelCase : str =0.9
elif size == "m36":
__UpperCamelCase : Optional[Any] =[6, 6, 18, 6]
__UpperCamelCase : Union[str, Any] =[96, 192, 384, 768]
__UpperCamelCase : int =4.0
__UpperCamelCase : Tuple =1e-6
__UpperCamelCase : Dict =0.95
elif size == "m48":
__UpperCamelCase : Union[str, Any] =[8, 8, 24, 8]
__UpperCamelCase : List[Any] =[96, 192, 384, 768]
__UpperCamelCase : Optional[Any] =4.0
__UpperCamelCase : Optional[Any] =1e-6
__UpperCamelCase : Dict =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase : List[str] =PoolFormerImageProcessor(crop_pct=a_ )
# Prepare image
__UpperCamelCase : Optional[Any] =prepare_img()
__UpperCamelCase : int =image_processor(images=a_ ,return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase : List[str] =torch.load(a_ ,map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase : Union[str, Any] =rename_keys(a_ )
# create HuggingFace model and load state dict
__UpperCamelCase : List[Any] =PoolFormerForImageClassification(a_ )
model.load_state_dict(a_ )
model.eval()
# Define image processor
__UpperCamelCase : List[Any] =PoolFormerImageProcessor(crop_pct=a_ )
__UpperCamelCase : str =image_processor(images=prepare_img() ,return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase : List[Any] =model(a_ )
__UpperCamelCase : Union[str, Any] =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase : List[Any] =torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__UpperCamelCase : Tuple =torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__UpperCamelCase : List[Any] =torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__UpperCamelCase : Optional[int] =torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__UpperCamelCase : List[str] =torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,a_ ,atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
| 187
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=None , A_=1000 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case ( self ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int:
lowerCAmelCase = LiltModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ , bbox=A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
lowerCAmelCase = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
return True
def __snake_case ( self ) -> int:
lowerCAmelCase = LiltModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Any:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __snake_case ( self ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(A_ )
lowerCAmelCase = torch.tensor([[1, 2]] , device=A_ )
lowerCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(input_ids=A_ , bbox=A_ )
lowerCAmelCase = torch.Size([1, 2, 768] )
lowerCAmelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
| 187
| 1
|
"""simple docstring"""
import math
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> float:
return math.pow(lowerCAmelCase , 2 ) - a
def a__ ( lowerCAmelCase ) -> float:
return 2 * x
def a__ ( lowerCAmelCase ) -> float:
UpperCAmelCase__ : Tuple = 2.0
while start <= a:
UpperCAmelCase__ : List[Any] = math.pow(lowerCAmelCase , 2 )
return start
def a__ ( lowerCAmelCase , lowerCAmelCase = 99_99 , lowerCAmelCase = 0.00_0000_0000_0001 ) -> float:
if a < 0:
raise ValueError("""math domain error""" )
UpperCAmelCase__ : Any = get_initial_point(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
UpperCAmelCase__ : Dict = value
UpperCAmelCase__ : Optional[int] = value - fx(lowerCAmelCase , lowerCAmelCase ) / fx_derivative(lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptTokenizer
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : int = """lower newer"""
return input_text, output_text
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[str] = """lower"""
UpperCAmelCase__ : Optional[Any] = ["""low""", """er</w>"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 171
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = 384
UpperCAmelCase_ : List[str] = 7
if "tiny" in model_name:
UpperCAmelCase_ : int = 96
UpperCAmelCase_ : Any = (2, 2, 6, 2)
UpperCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase_ : Optional[int] = 96
UpperCAmelCase_ : Optional[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : Dict = (3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase_ : Optional[int] = 128
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (4, 8, 16, 32)
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : Any = 512
elif "large" in model_name:
UpperCAmelCase_ : str = 192
UpperCAmelCase_ : Dict = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (6, 12, 24, 48)
UpperCAmelCase_ : Optional[int] = 12
UpperCAmelCase_ : List[Any] = 768
# set label information
UpperCAmelCase_ : Dict = 150
UpperCAmelCase_ : str = """huggingface/label-files"""
UpperCAmelCase_ : Optional[int] = """ade20k-id2label.json"""
UpperCAmelCase_ : str = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_ : Dict = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = SwinConfig(
embed_dim=_a , depths=_a , num_heads=_a , window_size=_a , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
UpperCAmelCase_ : List[Any] = UperNetConfig(
backbone_config=_a , auxiliary_in_channels=_a , num_labels=_a , idalabel=_a , labelaid=_a , )
return config
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ ( _a : Dict , _a : Any , _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = dct.pop(_a )
UpperCAmelCase_ : str = val
def lowerCamelCase_ ( _a : Dict , _a : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Any = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
UpperCAmelCase_ : Optional[Any] = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[:dim, :]
UpperCAmelCase_ : int = in_proj_bias[: dim]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : str = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Any = x.shape
UpperCAmelCase_ : Any = x.reshape(_a , 4 , in_channel // 4 )
UpperCAmelCase_ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_a , _a )
return x
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = x.shape
UpperCAmelCase_ : Optional[Any] = x.reshape(_a , in_channel // 4 , 4 )
UpperCAmelCase_ : Tuple = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_a , _a )
return x
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = x.shape[0]
UpperCAmelCase_ : List[Any] = x.reshape(4 , in_channel // 4 )
UpperCAmelCase_ : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_a )
return x
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = x.shape[0]
UpperCAmelCase_ : Any = x.reshape(in_channel // 4 , 4 )
UpperCAmelCase_ : List[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_a )
return x
def lowerCamelCase_ ( _a : Dict , _a : Optional[int] , _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
UpperCAmelCase_ : Dict = model_name_to_url[model_name]
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_a , map_location="""cpu""" , file_name=_a )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_a , param.shape )
UpperCAmelCase_ : str = get_upernet_config(_a )
UpperCAmelCase_ : Optional[Any] = UperNetForSemanticSegmentation(_a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ : List[str] = state_dict.pop(_a )
if "bn" in key:
UpperCAmelCase_ : List[Any] = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase_ : List[str] = val
# rename keys
UpperCAmelCase_ : Any = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase_ : Union[str, Any] = reverse_correct_unfold_reduction_order(_a )
if "norm" in key:
UpperCAmelCase_ : Union[str, Any] = reverse_correct_unfold_norm_order(_a )
model.load_state_dict(_a )
# verify on image
UpperCAmelCase_ : str = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw ).convert("""RGB""" )
UpperCAmelCase_ : str = SegformerImageProcessor()
UpperCAmelCase_ : int = processor(_a , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ : int = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase_ : Dict = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase_ : int = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_a )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 59
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 59
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = np.argmax(A_, axis=1 )
return np.sum(outputs == labels )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, encoding="""utf_8""" ) as f:
__magic_name__ = csv.reader(A_ )
__magic_name__ = []
next(A_ ) # skip the first line
for line in tqdm(A_ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( A_, A_, A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = []
for dataset in encoded_datasets:
__magic_name__ = len(A_ )
__magic_name__ = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
__magic_name__ = np.zeros((n_batch, 2), dtype=np.intaa )
__magic_name__ = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
__magic_name__ = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A_ ):
__magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ = with_conta
__magic_name__ = with_conta
__magic_name__ = len(A_ ) - 1
__magic_name__ = len(A_ ) - 1
__magic_name__ = with_conta
__magic_name__ = with_conta
__magic_name__ = mc_label
__magic_name__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A_ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=A_, default="""openai-gpt""", help="""pretrained model name""" )
parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" )
parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""", default=A_, type=A_, required=A_, help="""The output directory where the model predictions and checkpoints will be written.""", )
parser.add_argument("""--train_dataset""", type=A_, default="""""" )
parser.add_argument("""--eval_dataset""", type=A_, default="""""" )
parser.add_argument("""--seed""", type=A_, default=42 )
parser.add_argument("""--num_train_epochs""", type=A_, default=3 )
parser.add_argument("""--train_batch_size""", type=A_, default=8 )
parser.add_argument("""--eval_batch_size""", type=A_, default=16 )
parser.add_argument("""--adam_epsilon""", default=1e-8, type=A_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", type=A_, default=1 )
parser.add_argument(
"""--max_steps""", default=-1, type=A_, help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
), )
parser.add_argument(
"""--gradient_accumulation_steps""", type=A_, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", )
parser.add_argument("""--learning_rate""", type=A_, default=6.25e-5 )
parser.add_argument("""--warmup_steps""", default=0, type=A_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""", type=A_, default="""warmup_linear""" )
parser.add_argument("""--weight_decay""", type=A_, default=0.01 )
parser.add_argument("""--lm_coef""", type=A_, default=0.9 )
parser.add_argument("""--n_valid""", type=A_, default=374 )
parser.add_argument("""--server_ip""", type=A_, default="""""", help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""", type=A_, default="""""", help="""Can be used for distant debugging.""" )
__magic_name__ = parser.parse_args()
print(A_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__magic_name__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(A_, A_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ = ["""_start_""", """_delimiter_""", """_classify_"""]
__magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A_ )
__magic_name__ = tokenizer.convert_tokens_to_ids(A_ )
__magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A_ ) )
model.to(A_ )
# Load and encode the datasets
def tokenize_and_encode(A_ ):
if isinstance(A_, A_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A_ ) )
elif isinstance(A_, A_ ):
return obj
return [tokenize_and_encode(A_ ) for o in obj]
logger.info("""Encoding dataset...""" )
__magic_name__ = load_rocstories_dataset(args.train_dataset )
__magic_name__ = load_rocstories_dataset(args.eval_dataset )
__magic_name__ = (train_dataset, eval_dataset)
__magic_name__ = tokenize_and_encode(A_ )
# Compute the max input length for the Transformer
__magic_name__ = model.config.n_positions // 2 - 2
__magic_name__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ = min(A_, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ = pre_process_datasets(A_, A_, A_, *A_ )
__magic_name__ , __magic_name__ = tensor_datasets[0], tensor_datasets[1]
__magic_name__ = TensorDataset(*A_ )
__magic_name__ = RandomSampler(A_ )
__magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.train_batch_size )
__magic_name__ = TensorDataset(*A_ )
__magic_name__ = SequentialSampler(A_ )
__magic_name__ = DataLoader(A_, sampler=A_, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ = args.max_steps
__magic_name__ = args.max_steps // (len(A_ ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ = len(A_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ = list(model.named_parameters() )
__magic_name__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__magic_name__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__magic_name__ = AdamW(A_, lr=args.learning_rate, eps=args.adam_epsilon )
__magic_name__ = get_linear_schedule_with_warmup(
A_, num_warmup_steps=args.warmup_steps, num_training_steps=A_ )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ):
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(A_, desc="""Training""" )
for step, batch in enumerate(A_ ):
__magic_name__ = tuple(t.to(A_ ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch
__magic_name__ = model(A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ )
__magic_name__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ = """Training loss: {:.2e} lr: {:.2e}""".format(A_, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ = model.module if hasattr(A_, """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ = os.path.join(args.output_dir, A_ )
__magic_name__ = os.path.join(args.output_dir, A_ )
torch.save(model_to_save.state_dict(), A_ )
model_to_save.config.to_json_file(A_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A_ )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ = 0, 0
__magic_name__ , __magic_name__ = 0, 0
for batch in tqdm(A_, desc="""Evaluating""" ):
__magic_name__ = tuple(t.to(A_ ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = model(
A_, mc_token_ids=A_, lm_labels=A_, mc_labels=A_ )
__magic_name__ = mc_logits.detach().cpu().numpy()
__magic_name__ = mc_labels.to("""cpu""" ).numpy()
__magic_name__ = accuracy(A_, A_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ = eval_loss / nb_eval_steps
__magic_name__ = eval_accuracy / nb_eval_examples
__magic_name__ = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__magic_name__ = os.path.join(args.output_dir, """eval_results.txt""" )
with open(A_, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", A_, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 88
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]) ->None:
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 273
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : Union[str, Any] ) -> List[Any]:
lowercase__ : List[Any] = data
def __iter__( self : Optional[int] ) -> Optional[int]:
for element in self.data:
yield element
def lowercase_ ( _lowerCamelCase : List[Any]=True):
lowercase__ : int = Accelerator(even_batches=lowerCAmelCase_)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowercase_ ( _lowerCamelCase : Accelerator , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False):
if iterable:
lowercase__ : Tuple = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase_)))
else:
lowercase__ : List[str] = TensorDataset(torch.as_tensor(range(lowerCAmelCase_)))
lowercase__ : List[Any] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_)
lowercase__ : Optional[int] = accelerator.prepare(lowerCAmelCase_)
return dl
def lowercase_ ( _lowerCamelCase : Accelerator , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , ):
lowercase__ : Dict = create_dataloader(accelerator=lowerCAmelCase_ , dataset_size=lowerCAmelCase_ , batch_size=lowerCAmelCase_)
lowercase__ : Optional[int] = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowercase_ ( ):
lowercase__ : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowercase_ ( ):
lowercase__ : Tuple = create_accelerator(even_batches=lowerCAmelCase_)
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowercase_ ( ):
lowercase__ : int = create_accelerator(even_batches=lowerCAmelCase_)
lowercase__ : str = torch.nn.Linear(1 , 1)
lowercase__ : Optional[int] = accelerator.prepare(lowerCAmelCase_)
lowercase__ : Tuple = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1)
lowercase__ : int = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(lowerCAmelCase_):
lowercase__ : Optional[int] = ddp_model(batch[0].float())
lowercase__ : str = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase_)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowercase_ ( _lowerCamelCase : Tuple):
with warnings.catch_warnings(record=lowerCAmelCase_) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category , lowerCAmelCase_)
assert "only supported for multi-GPU" in str(w[-1].message)
def lowercase_ ( ):
lowercase__ : List[Any] = True
lowercase__ : List[str] = False
lowercase__ : Tuple = create_accelerator(even_batches=lowerCAmelCase_)
lowercase__ : int = torch.nn.Linear(1 , 1)
lowercase__ : Tuple = accelerator.prepare(lowerCAmelCase_)
lowercase__ : Optional[int] = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1)
lowercase__ : Union[str, Any] = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_):
lowercase__ : Optional[Any] = train_dl.batch_sampler.even_batches
lowercase__ : Tuple = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ):
lowercase__ : str = True
lowercase__ : int = False
lowercase__ : Optional[Any] = create_accelerator(even_batches=lowerCAmelCase_)
lowercase__ : List[str] = torch.nn.Linear(1 , 1)
lowercase__ : Dict = accelerator.prepare(lowerCAmelCase_)
create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase_)
lowercase__ : List[Any] = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_):
lowercase__ : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ):
lowercase__ : Optional[int] = create_accelerator()
lowercase__ : int = torch.nn.Linear(1 , 1)
lowercase__ : List[Any] = accelerator.prepare(lowerCAmelCase_)
create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase_)
with warnings.catch_warnings(record=lowerCAmelCase_) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_):
pass
assert issubclass(w[-1].category , lowerCAmelCase_)
assert "only supported for map-style datasets" in str(w[-1].message)
def lowercase_ ( ):
lowercase__ : Tuple = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes")
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled")
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs")
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs")
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types")
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning")
lowercase__ : Any = accelerator.state.distributed_type
lowercase__ : Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase_)
lowercase__ : Any = original_state
if __name__ == "__main__":
main()
| 87
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :str = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
SCREAMING_SNAKE_CASE :Optional[int] = {'''mobilebert-uncased''': 5_12}
SCREAMING_SNAKE_CASE :Dict = {}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Dict , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Tuple="[CLS]" , _lowerCAmelCase : Union[str, Any]="[MASK]" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_lowerCAmelCase )
snake_case_ = do_lower_case
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 159
| 0
|
"""simple docstring"""
def _snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(UpperCAmelCase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE_ : List[str] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
SCREAMING_SNAKE_CASE_ : List[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCamelCase ( self: int , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = TER(
normalized=UpperCamelCase , no_punct=UpperCamelCase , asian_support=UpperCamelCase , case_sensitive=UpperCamelCase , )
A__ = sb_ter.corpus_score(UpperCamelCase , UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 69
| 0
|
lowerCAmelCase : Optional[Any] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase : List[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 13
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_lowercase : Any =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> int:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """vision""" )
self.check_model_type(__lowercase )
def __call__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_image(__lowercase )
a__ : Optional[int] = image.size
a__ : int = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = model_outputs.predicted_depth
a__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__lowercase )
a__ : Union[str, Any] = prediction.squeeze().cpu().numpy()
a__ : List[str] = (output * 2_5_5 / np.max(__lowercase )).astype("""uint8""" )
a__ : Any = Image.fromarray(__lowercase )
a__ : List[str] = {}
a__ : Tuple = predicted_depth
a__ : Any = depth
return output_dict
| 170
| 0
|
"""simple docstring"""
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A = """Create a default config file for Accelerate with only a few flags set."""
def a__ ( lowerCAmelCase="no" , lowerCAmelCase = default_json_config_file , lowerCAmelCase = False ) -> Optional[int]:
UpperCAmelCase__ : Dict = Path(lowerCAmelCase )
path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase__ : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
UpperCAmelCase__ : str = num_gpus
UpperCAmelCase__ : str = False
if num_gpus > 1:
UpperCAmelCase__ : Tuple = """MULTI_GPU"""
else:
UpperCAmelCase__ : Tuple = """NO"""
elif is_xpu_available() and use_xpu:
UpperCAmelCase__ : Tuple = torch.xpu.device_count()
UpperCAmelCase__ : List[Any] = num_xpus
UpperCAmelCase__ : List[Any] = False
if num_xpus > 1:
UpperCAmelCase__ : Tuple = """MULTI_XPU"""
else:
UpperCAmelCase__ : str = """NO"""
elif is_npu_available():
UpperCAmelCase__ : Dict = torch.npu.device_count()
UpperCAmelCase__ : Any = num_npus
UpperCAmelCase__ : Dict = False
if num_npus > 1:
UpperCAmelCase__ : Union[str, Any] = """MULTI_NPU"""
else:
UpperCAmelCase__ : Optional[Any] = """NO"""
else:
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : Dict = """NO"""
UpperCAmelCase__ : List[str] = ClusterConfig(**lowerCAmelCase )
config.to_json_file(lowerCAmelCase )
return path
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : int = parser.add_parser("""default""" , parents=lowerCAmelCase , help=lowerCAmelCase , formatter_class=lowerCAmelCase )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowerCAmelCase , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowerCAmelCase )
return parser
def a__ ( lowerCAmelCase ) -> Optional[Any]:
UpperCAmelCase__ : Tuple = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 367
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
UpperCAmelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
UpperCAmelCase__ : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ : List[Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase__ : str = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _lowerCamelCase )
if only_modules:
UpperCAmelCase__ : List[str] = file.split(""".""" )[0]
try:
UpperCAmelCase__ : Any = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = doctest.DocTestSuite(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase__ : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : Union[str, Any] = """modeling"""
UpperCAmelCase__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = """tokenization"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = Path("""src/transformers""" )
UpperCAmelCase__ : Optional[int] = """configuration"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = Path("""docs/source""" )
UpperCAmelCase__ : str = ["""favicon.ico"""]
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 166
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''dpt'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[2, 5, 8, 11] , SCREAMING_SNAKE_CASE__="project" , SCREAMING_SNAKE_CASE__=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE__=[96, 1_92, 3_84, 7_68] , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_55 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 10_24, 24, 24] , SCREAMING_SNAKE_CASE__=[0, 1] , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : str = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
SCREAMING_SNAKE_CASE__ : Dict = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : List[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Any = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
SCREAMING_SNAKE_CASE__ : Dict = readout_type
SCREAMING_SNAKE_CASE__ : Optional[int] = reassemble_factors
SCREAMING_SNAKE_CASE__ : List[str] = neck_hidden_sizes
SCREAMING_SNAKE_CASE__ : Dict = fusion_hidden_size
SCREAMING_SNAKE_CASE__ : int = head_in_index
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Tuple = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : List[Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Tuple = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE__ : List[str] = semantic_classifier_dropout
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 25
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25
| 1
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCAmelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = torchvision.models.resnetaaa(pretrained=snake_case )
snake_case_ = list(model.children() )[:-2]
snake_case_ = nn.Sequential(*snake_case )
snake_case_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def a ( self , snake_case ):
snake_case_ = self.pool(self.model(snake_case ) )
snake_case_ = torch.flatten(snake_case , start_dim=2 )
snake_case_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase ( lowercase_ ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = [json.loads(snake_case ) for l in open(snake_case )]
snake_case_ = os.path.dirname(snake_case )
snake_case_ = tokenizer
snake_case_ = labels
snake_case_ = len(snake_case )
snake_case_ = max_seq_length
snake_case_ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , snake_case ):
snake_case_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=snake_case ) )
snake_case_ , snake_case_ , snake_case_ = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ = sentence[: self.max_seq_length]
snake_case_ = torch.zeros(self.n_classes )
snake_case_ = 1
snake_case_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
snake_case_ = self.transforms(snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def a ( self ):
snake_case_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [len(row['sentence'] ) for row in batch]
snake_case_ , snake_case_ = len(UpperCamelCase__ ), max(UpperCamelCase__ )
snake_case_ = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
snake_case_ = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
snake_case_ = input_row['sentence']
snake_case_ = 1
snake_case_ = torch.stack([row['image'] for row in batch] )
snake_case_ = torch.stack([row['label'] for row in batch] )
snake_case_ = torch.stack([row['image_start_token'] for row in batch] )
snake_case_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __lowerCamelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __lowerCamelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 353
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BarthezTokenizer
__SCREAMING_SNAKE_CASE : str = BarthezTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = True
def a ( self ):
super().setUp()
snake_case_ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
snake_case_ = tokenizer
def a ( self ):
snake_case_ = '<pad>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def a ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 10_1122 )
def a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def a ( self ):
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = [0, 57, 3018, 7_0307, 91, 2]
snake_case_ = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.tokenize(snake_case )
snake_case_ = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case )
snake_case_ = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(snake_case )
snake_case_ = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def a ( self ):
# fmt: off
snake_case_ = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case_ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 200
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'biogpt'
def __init__( self : str,lowercase_ : Union[str, Any]=4_2_3_8_4,lowercase_ : List[str]=1_0_2_4,lowercase_ : Dict=2_4,lowercase_ : str=1_6,lowercase_ : Dict=4_0_9_6,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[str]=1_0_2_4,lowercase_ : Optional[Any]=0.02,lowercase_ : str=1E-12,lowercase_ : List[Any]=True,lowercase_ : Dict=True,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[Any]=0.0,lowercase_ : Union[str, Any]=1,lowercase_ : List[Any]=0,lowercase_ : Dict=2,**lowercase_ : List[str],)-> Dict:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = scale_embedding
A__ = use_cache
A__ = layerdrop
A__ = activation_dropout
super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ )
| 7
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
| 0
|
import csv
import tweepy
# Twitter API credentials
SCREAMING_SNAKE_CASE : int = ""
SCREAMING_SNAKE_CASE : Dict = ""
SCREAMING_SNAKE_CASE : Any = ""
SCREAMING_SNAKE_CASE : Optional[int] = ""
def UpperCamelCase ( _a ) -> None:
'''simple docstring'''
lowercase_ :int = tweepy.OAuthHandler(_a , _a )
auth.set_access_token(_a , _a )
lowercase_ :List[Any] = tweepy.API(_a )
# initialize a list to hold all the tweepy Tweets
lowercase_ :str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ :int = api.user_timeline(screen_name=_a , count=2_0_0 )
# save most recent tweets
alltweets.extend(_a )
# save the id of the oldest tweet less one
lowercase_ :Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_a ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ :Optional[int] = api.user_timeline(
screen_name=_a , count=2_0_0 , max_id=_a )
# save most recent tweets
alltweets.extend(_a )
# update the id of the oldest tweet less one
lowercase_ :List[str] = alltweets[-1].id - 1
print(f"...{len(_a )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ :str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
lowercase_ :Union[str, Any] = csv.writer(_a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 252
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = input("Enter image url: ").strip()
print(f"Downloading image from {url} ...")
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE : List[str] = soup.find("meta", {"property": "og:image"})["content"]
SCREAMING_SNAKE_CASE : str = requests.get(image_url).content
SCREAMING_SNAKE_CASE : str = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"Done. Image saved to disk as {file_name}.")
| 252
| 1
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : list[tuple[float, float]] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__UpperCAmelCase : str = len(__lowercase ) - 1
def UpperCAmelCase ( self : List[str] , __lowercase : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCAmelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowercase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowercase ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self : List[str] , __lowercase : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCAmelCase : Optional[int] = self.basis_function(__lowercase )
__UpperCAmelCase : Optional[Any] = 0.0
__UpperCAmelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : float = 0.01 ) -> Tuple:
from matplotlib import pyplot as plt # type: ignore
__UpperCAmelCase : list[float] = [] # x coordinates of points to plot
__UpperCAmelCase : list[float] = [] # y coordinates of points to plot
__UpperCAmelCase : str = 0.0
while t <= 1:
__UpperCAmelCase : int = self.bezier_curve_function(__lowercase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__UpperCAmelCase : Any = [i[0] for i in self.list_of_points]
__UpperCAmelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
__lowercase , __lowercase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__lowercase , __lowercase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 114
|
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase : List[str] = 8
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Union[str, Any]=BITS ):
__UpperCAmelCase = x.device
__UpperCAmelCase = (x * 255).int().clamp(0 , 255 )
__UpperCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
__UpperCAmelCase = rearrange(__lowerCAmelCase , '''d -> d 1 1''' )
__UpperCAmelCase = rearrange(__lowerCAmelCase , '''b c h w -> b c 1 h w''' )
__UpperCAmelCase = ((x & mask) != 0).float()
__UpperCAmelCase = rearrange(__lowerCAmelCase , '''b c d h w -> b (c d) h w''' )
__UpperCAmelCase = bits * 2 - 1
return bits
def lowercase__ ( snake_case_ :int , snake_case_ :Optional[Any]=BITS ):
__UpperCAmelCase = x.device
__UpperCAmelCase = (x > 0).int()
__UpperCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
__UpperCAmelCase = rearrange(__lowerCAmelCase , '''d -> d 1 1''' )
__UpperCAmelCase = rearrange(__lowerCAmelCase , '''b (c d) h w -> b c d h w''' , d=8 )
__UpperCAmelCase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowercase__ ( self :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :Tuple , snake_case_ :Tuple , snake_case_ :Optional[Any] = 0.0 , snake_case_ :Dict = True , snake_case_ :Optional[Any]=None , snake_case_ :Union[str, Any] = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__UpperCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__UpperCAmelCase = self.alphas_cumprod[timestep]
__UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__UpperCAmelCase = self.bit_scale
if self.config.clip_sample:
__UpperCAmelCase = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__UpperCAmelCase = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__UpperCAmelCase = model_output.device if torch.is_tensor(__lowerCAmelCase ) else '''cpu'''
__UpperCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
__UpperCAmelCase = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
__UpperCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def lowercase__ ( self :Tuple , snake_case_ :List[Any] , snake_case_ :List[str] , snake_case_ :Dict , snake_case_ :Any="epsilon" , snake_case_ :Union[str, Any]=None , snake_case_ :Dict = True , ):
__UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
__UpperCAmelCase = None
# 1. compute alphas, betas
__UpperCAmelCase = self.alphas_cumprod[t]
__UpperCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__UpperCAmelCase = 1 - alpha_prod_t
__UpperCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__UpperCAmelCase = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__UpperCAmelCase = self.bit_scale
if self.config.clip_sample:
__UpperCAmelCase = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__UpperCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCAmelCase = 0
if t > 0:
__UpperCAmelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
__UpperCAmelCase = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
__UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class _UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : List[Any] , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, DDPMScheduler] , _lowercase : Optional[float] = 1.0 , ):
super().__init__()
__UpperCAmelCase = bit_scale
__UpperCAmelCase = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self : Union[str, Any] , _lowercase : Optional[int] = 2_56 , _lowercase : Optional[int] = 2_56 , _lowercase : Optional[int] = 50 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[int] = 1 , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
__UpperCAmelCase = decimal_to_bits(__lowercase ) * self.bit_scale
__UpperCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__UpperCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__UpperCAmelCase = bits_to_decimal(__lowercase )
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 350
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowercase : Tuple , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def a ( self : int , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Dict ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
return results
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any=False , _lowercase : Union[str, Any]=False ):
__UpperCAmelCase = load_image(inputs['''image'''] )
__UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : str , _lowercase : Optional[int] , _lowercase : Any=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 86
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = value
UpperCamelCase : Node | None = None
UpperCamelCase : Node | None = None
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Dict = tree
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ) -> tuple[list[int], int]:
UpperCamelCase : int = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, ...]:
for triplet in permutations(snake_case__ , 3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase : List[str] = len(snake_case__ )
for i in range(n - 1 ):
UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ) -> tuple[float, float]:
UpperCamelCase : Any = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
UpperCamelCase : Optional[Any] = '\ntriplet_sum1(*dataset)\n'
UpperCamelCase : Dict = '\ntriplet_sum2(*dataset)\n'
UpperCamelCase : Optional[int] = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
UpperCamelCase : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 119
| 1
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 16
__lowerCAmelCase = 32
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
_snake_case = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE )
_snake_case = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Initialize accelerator
_snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config["""lr"""]
_snake_case = int(config["""num_epochs"""] )
_snake_case = int(config["""seed"""] )
_snake_case = int(config["""batch_size"""] )
_snake_case = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case = batch_size // MAX_GPU_BATCH_SIZE
_snake_case = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
_snake_case, _snake_case = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.loss
_snake_case = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case, _snake_case = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case = parser.parse_args()
_snake_case = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 270
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
UpperCamelCase = 10
UpperCamelCase = 256
def __lowerCamelCase ( snake_case__ ) -> Optional[MinHash]:
"""simple docstring"""
if len(snake_case__ ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE = MinHash(num_perm=snake_case__ )
for token in set(snake_case__ ):
min_hash.update(token.encode() )
return min_hash
def __lowerCamelCase ( snake_case__ ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0}
class __UpperCAmelCase :
def __init__( self: Optional[Any] , *,
UpperCAmelCase_: float = 0.85 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE = NUM_PERM
_SCREAMING_SNAKE_CASE = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: MinHash ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._index.query(UpperCAmelCase_ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE = [base] + list(UpperCAmelCase_ )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase_ )
return duplicate_clusters
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_duplicate_clusters()
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(snake_case__ ,max_queue_size=1_00_00 ) ,chunksize=1_00 ,):
if data is not None:
yield data
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DuplicationIndex(duplication_jaccard_threshold=snake_case__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ) ,max_queue_size=1_00 ) ):
di.add(snake_case__ ,snake_case__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_tokens(snake_case__ )
_SCREAMING_SNAKE_CASE = get_tokens(snake_case__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCamelCase = None
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for elementa in cluster:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case__ ,snake_case__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE = 1
extremes.append(snake_case__ )
return extremes
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = partial(_find_cluster_extremes_shared ,jaccard_threshold=snake_case__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case__ ,snake_case__ ,) ,total=len(snake_case__ ) ,):
extremes_list.append(snake_case__ )
return extremes_list
def __lowerCamelCase ( snake_case__ ,snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = make_duplicate_clusters(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = find_extremes(snake_case__ ,snake_case__ ,snake_case__ )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE = dataset.filter(lambda snake_case__ ,snake_case__ : idx not in remove_indices ,with_indices=snake_case__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'Original dataset size: {len(snake_case__ )}' )
print(F'Number of duplicate clusters: {len(snake_case__ )}' )
print(F'Files in duplicate cluster: {len(snake_case__ )}' )
print(F'Unique files in duplicate cluster: {len(snake_case__ )}' )
print(F'Filtered dataset size: {len(snake_case__ )}' )
return ds_filter, duplicate_clusters
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_snake_case = None
_snake_case = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
_snake_case = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _A ( __magic_name__ , __magic_name__=1 , __magic_name__=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _A ( __magic_name__ ):
with open(__magic_name__ , "r" ) as f:
return json.load(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , "tmp" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase__ = read_json(os.path.join(__magic_name__ , "params.json" ) )
lowercase__ = NUM_SHARDS[model_size]
lowercase__ = params["n_layers"]
lowercase__ = params["n_heads"]
lowercase__ = n_heads // num_shards
lowercase__ = params["dim"]
lowercase__ = dim // n_heads
lowercase__ = 10_000.0
lowercase__ = 1.0 / (base ** (torch.arange(0 , __magic_name__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase__ = params["n_kv_heads"] # for GQA / MQA
lowercase__ = n_heads_per_shard // num_key_value_heads
lowercase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase__ = n_heads
lowercase__ = n_heads_per_shard
lowercase__ = dim
# permute for sliced rotary
def permute(__magic_name__ , __magic_name__=n_heads , __magic_name__=dim , __magic_name__=dim ):
return w.view(__magic_name__ , dima // n_heads // 2 , 2 , __magic_name__ ).transpose(1 , 2 ).reshape(__magic_name__ , __magic_name__ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase__ = torch.load(os.path.join(__magic_name__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowercase__ = [
torch.load(os.path.join(__magic_name__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" )
for i in range(__magic_name__ )
]
lowercase__ = 0
lowercase__ = {"weight_map": {}}
for layer_i in range(__magic_name__ ):
lowercase__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowercase__ = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase__ = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
lowercase__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) )
lowercase__ = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , )
lowercase__ = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ )
lowercase__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(__magic_name__ )] , dim=1 )
lowercase__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__magic_name__ )] , dim=0 )
lowercase__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__magic_name__ )] , dim=1 )
lowercase__ = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__magic_name__ )] , dim=0 )
lowercase__ = inv_freq
for k, v in state_dict.items():
lowercase__ = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
lowercase__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowercase__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowercase__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(__magic_name__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(__magic_name__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase__ = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
# Write configs
lowercase__ = {"total_size": param_count * 2}
write_json(__magic_name__ , os.path.join(__magic_name__ , "pytorch_model.bin.index.json" ) )
lowercase__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowercase__ = params["multiple_of"] if "multiple_of" in params else 256
lowercase__ = LlamaConfig(
hidden_size=__magic_name__ , intermediate_size=compute_intermediate_size(__magic_name__ , __magic_name__ , __magic_name__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=__magic_name__ , )
config.save_pretrained(__magic_name__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowercase__ = LlamaForCausalLM.from_pretrained(__magic_name__ , torch_dtype=torch.floataa , low_cpu_mem_usage=__magic_name__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(__magic_name__ , safe_serialization=__magic_name__ )
shutil.rmtree(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
# Initialize the tokenizer based on the `spm` model
lowercase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
lowercase__ = tokenizer_class(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
def _A ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=__magic_name__ , help="Whether or not to save using `safetensors`." )
lowercase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase__ = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , __magic_name__ )
if __name__ == "__main__":
main()
| 201
|
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = []
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def _A ( __magic_name__ ):
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
_snake_case = 4
_snake_case = 2
_snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 201
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "gptj"
__SCREAMING_SNAKE_CASE = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowerCamelCase=5_0_4_0_0 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=2_8 , __lowerCamelCase=1_6 , __lowerCamelCase=6_4 , __lowerCamelCase=None , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=False , **__lowerCamelCase , ) -> Any:
_A : Dict = vocab_size
_A : Any = n_positions
_A : List[str] = n_embd
_A : Optional[int] = n_layer
_A : str = n_head
_A : Union[str, Any] = n_inner
_A : List[Any] = rotary_dim
_A : int = activation_function
_A : Dict = resid_pdrop
_A : int = embd_pdrop
_A : int = attn_pdrop
_A : Tuple = layer_norm_epsilon
_A : List[Any] = initializer_range
_A : Dict = use_cache
_A : Any = bos_token_id
_A : Optional[int] = eos_token_id
super().__init__(
bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = "default" , __lowerCamelCase = None , __lowerCamelCase = False , ) -> Dict:
super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase)
if not getattr(self._config , "pad_token_id" , __lowerCamelCase):
# TODO: how to do that better?
_A : Dict = 0
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
_A : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs")
_A : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_A : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCamelCase ( self) -> int:
return self._config.n_layer
@property
def _lowerCamelCase ( self) -> int:
return self._config.n_head
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Tuple = super(__lowerCamelCase , self).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
# We need to order the input in the way they appears in the forward()
_A : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A : Optional[int] = seqlen + 2
_A : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_A : List[str] = [
(torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(self.num_layers)
]
_A : List[Any] = common_inputs["attention_mask"]
if self.use_past:
_A : str = ordered_inputs["attention_mask"].dtype
_A : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1)
return ordered_inputs
@property
def _lowerCamelCase ( self) -> int:
return 1_3
| 11
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = DebertaTokenizer
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = DebertaTokenizerFast
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCamelCase = {"""unk_token""": """[UNK]"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Tuple , **UpperCamelCase_: Any ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = """lower newer"""
__lowerCamelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = """lower newer"""
__lowerCamelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer("""Hello""" , """World""" )
__lowerCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCamelCase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCamelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__lowerCamelCase = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
__lowerCamelCase = [tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
__lowerCamelCase = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCamelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase_ )
for expected, decoded in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 359
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [False] * len(A__ )
__lowerCamelCase = [-1] * len(A__ )
def dfs(A__ : Optional[int] , A__ : Optional[int] ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29
| 0
|
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = []
A__ = len(lowercase_ )
for i in range(lowercase_ ):
A__ = -1
for j in range(i + 1 , lowercase_ ):
if arr[i] < arr[j]:
A__ = arr[j]
break
result.append(lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = []
for i, outer in enumerate(lowercase_ ):
A__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
A__ = inner
break
result.append(lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = len(lowercase_ )
A__ = []
A__ = [-1] * arr_size
for index in reversed(range(lowercase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
A__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : int = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 14
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Tuple = 1000
lowerCAmelCase_ : List[str] = """huggingface/label-files"""
lowerCAmelCase_ : List[Any] = num_labels
lowerCAmelCase_ : str = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase_ : str = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Dict = CvtConfig(num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowerCAmelCase_ : Dict = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowerCAmelCase_ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase_ : Dict = [2, 2, 20]
lowerCAmelCase_ : int = [3, 12, 16]
lowerCAmelCase_ : Any = [192, 768, 1024]
lowerCAmelCase_ : List[Any] = CvtForImageClassification(lowercase__ )
lowerCAmelCase_ : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : Optional[Any] = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
lowerCAmelCase_ : Tuple = OrderedDict()
lowerCAmelCase_ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase_ : str = list_of_state_dict + cls_token(lowercase__ )
lowerCAmelCase_ : Dict = list_of_state_dict + embeddings(lowercase__ )
for cnt in range(config.depth[idx] ):
lowerCAmelCase_ : int = list_of_state_dict + attention(lowercase__ , lowercase__ )
lowerCAmelCase_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowercase__ )
for i in range(len(lowercase__ ) ):
lowerCAmelCase_ : int = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Any:
__a = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''projector.weight''']
__a = downstream_dict['''projector.bias''']
__a = downstream_dict['''model.post_net.linear.weight''']
__a = downstream_dict['''model.post_net.linear.bias''']
return model
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''model.linear.weight''']
__a = downstream_dict['''model.linear.bias''']
return model
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> str:
__a = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''connector.weight''']
__a = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__a = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__a = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> int:
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = checkpoint['''Downstream''']
__a = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
__a = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__a = convert_classification(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__a = convert_diarization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForXVector''' ):
__a = convert_xvector(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__a = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowercase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 45
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = TFEsmModel(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
__a = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmForMaskedLM(config=_a )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( self ):
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 45
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Any:
_snake_case : Union[str, Any] = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase_ = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Union[str, Any]:
_snake_case : List[str] = list(s_dict.keys() )
for key in keys:
_snake_case : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_snake_case : Dict = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(F"""{key} -> {new_key}""" )
_snake_case : Dict = s_dict.pop(lowerCAmelCase )
return s_dict
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Tuple:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
_snake_case : str = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str )-> bytes:
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_snake_case : Tuple = os.path.basename(lowerCAmelCase )
_snake_case : List[str] = url.split('/' )[-2]
_snake_case : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCAmelCase ):
_snake_case : List[Any] = open(lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=lowerCAmelCase , unit_divisor=10_24 ) as loop:
while True:
_snake_case : Tuple = source.read(81_92 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
_snake_case : List[Any] = open(lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Dict )-> List[Any]:
if ".pt" not in checkpoint_path:
_snake_case : Union[str, Any] = _download(_MODELS[checkpoint_path] )
else:
_snake_case : str = torch.load(lowerCAmelCase , map_location='cpu' )
_snake_case : List[Any] = original_checkpoint['dims']
_snake_case : Union[str, Any] = original_checkpoint['model_state_dict']
_snake_case : str = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
_snake_case : Optional[int] = True
_snake_case : Optional[int] = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_snake_case : Union[str, Any] = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_snake_case : Tuple = WhisperForConditionalGeneration(lowerCAmelCase )
_snake_case , _snake_case : Any = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_snake_case : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_snake_case : Tuple = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 260
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_ ( lowerCAmelCase: Features )-> Optional[int]:
_snake_case : str = np.inf
def set_batch_size(lowerCAmelCase: FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary":
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase , lowerCAmelCase )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : NestedDataStructureLike[PathLike] , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , num_proc=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = path_or_paths if isinstance(UpperCamelCase , UpperCamelCase ) else {self.split: path_or_paths}
_snake_case : List[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
_snake_case : Optional[Any] = Parquet(
cache_dir=UpperCamelCase , data_files=UpperCamelCase , features=UpperCamelCase , hash=UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
_snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , num_proc=self.num_proc , )
_snake_case : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase : Dataset , UpperCamelCase : Union[PathLike, BinaryIO] , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
_snake_case : Tuple = dataset
_snake_case : Union[str, Any] = path_or_buf
_snake_case : List[Any] = batch_size or get_writer_batch_size(dataset.features )
_snake_case : Optional[Any] = parquet_writer_kwargs
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
_snake_case : Any = self._write(file_obj=UpperCamelCase , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
else:
_snake_case : Tuple = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
return written
def UpperCamelCase_ ( self : Dict , UpperCamelCase : BinaryIO , UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[str] = 0
_snake_case : Dict = parquet_writer_kwargs.pop('path_or_buf' , UpperCamelCase )
_snake_case : Optional[Any] = self.dataset.features.arrow_schema
_snake_case : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase , **UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
_snake_case : Tuple = query_table(
table=self.dataset._data , key=slice(UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 338
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE__ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowerCAmelCase : Optional[bool] = field(
default=lowercase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_lowerCAmelCase : Optional[bool] = field(
default=lowercase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_lowerCAmelCase : Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
_lowerCAmelCase : Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
_lowerCAmelCase : Optional[float] = field(
default=0.99_9995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def lowerCAmelCase__ ( _UpperCamelCase : ModelArguments , _UpperCamelCase : TrainingArguments ) -> Tuple:
"""simple docstring"""
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case = logging.WARNING
if model_args.verbose_logging:
snake_case = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case = logging.INFO
logger.setLevel(lowerCamelCase_ )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(
default=lowercase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_lowerCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_lowerCAmelCase : Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} , )
_lowerCAmelCase : Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to \'file\'"""} , )
_lowerCAmelCase : bool = field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_lowerCAmelCase : Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there\'s no validation split"""
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowercase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_lowerCAmelCase : Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : WavaVecaForPreTraining
_lowerCAmelCase : WavaVecaFeatureExtractor
_lowerCAmelCase : Union[bool, str] = "longest"
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
def __call__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.feature_extractor.pad(
_UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
snake_case = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
snake_case = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
snake_case = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case = 1
snake_case = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , )
return batch
class lowerCAmelCase_ ( lowercase__ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=1.0 , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
snake_case = 0
snake_case = max_gumbel_temp
snake_case = min_gumbel_temp
snake_case = gumbel_temp_decay
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
model.train()
snake_case = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
snake_case = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
snake_case = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowerCAmelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case = parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase_ , lowerCamelCase_ )
# Downloading and loading a dataset from the hub.
snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case = DatasetDict()
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case = DatasetDict()
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase_ )
def prepare_dataset(_UpperCamelCase : List[Any] ):
# check that all files have the correct sampling rate
snake_case = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case = datasets.map(
lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
snake_case = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase : Dict ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case = vectorized_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
snake_case = WavaVecaForPreTraining(lowerCamelCase_ )
snake_case = DataCollatorForWavaVecaPretraining(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
snake_case = WavaVecaPreTrainer(
model=lowerCamelCase_ , data_collator=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=lowerCamelCase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCamelCase ) )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(_UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 149
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'num_attention_heads' ) )
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=3 ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=16 ,__UpperCamelCase=[128, 256, 384] ,__UpperCamelCase=[4, 6, 8] ,__UpperCamelCase=[2, 3, 4] ,__UpperCamelCase=[16, 16, 16] ,__UpperCamelCase=0 ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : Any = batch_size
lowercase_ : str = image_size
lowercase_ : List[Any] = num_channels
lowercase_ : Any = kernel_size
lowercase_ : Any = stride
lowercase_ : List[str] = padding
lowercase_ : Tuple = hidden_sizes
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[int] = depths
lowercase_ : Union[str, Any] = key_dim
lowercase_ : List[str] = drop_path_rate
lowercase_ : Dict = patch_size
lowercase_ : List[Any] = attention_ratio
lowercase_ : Optional[int] = mlp_ratio
lowercase_ : Any = initializer_range
lowercase_ : Optional[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : Dict = num_labels
lowercase_ : Union[str, Any] = initializer_range
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ , lowercase_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : Optional[Any] = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = LevitModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase_ , lowercase_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : List[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
lowercase_ : Tuple = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Any = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ : Any = problem_type['title']
lowercase_ : Tuple = problem_type['num_labels']
lowercase_ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
lowercase_ : str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
lowercase_ : int = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
lowercase_ : str = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 213
| 1
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def _UpperCAmelCase (UpperCamelCase_ : TreeNode | None ):
'''simple docstring'''
# Validation
def is_valid_tree(UpperCamelCase_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase_ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
UpperCamelCase_ : TreeNode | None , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase_ )
)
return is_binary_search_tree_recursive_check(UpperCamelCase_ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowerCamelCase : Dict = namedtuple("covid_data", "cases deaths recovered")
def _UpperCAmelCase (UpperCamelCase_ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_lowerCAmelCase : Dict = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase_ ).content ).xpath(UpperCamelCase_ ) )
_lowerCamelCase : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 159
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa)
a__: Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa)
a__: Any = controlnet_params
a__: Dict = """bird"""
a__: List[Any] = jax.device_count()
a__: Tuple = pipe.prepare_text_inputs([prompts] * num_samples)
a__: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png')
a__: List[str] = pipe.prepare_image_inputs([canny_image] * num_samples)
a__: Optional[int] = jax.random.PRNGKey(0)
a__: Optional[Any] = jax.random.split(lowerCamelCase__ , jax.device_count())
a__: Dict = replicate(lowerCamelCase__)
a__: List[Any] = shard(lowerCamelCase__)
a__: Dict = shard(lowerCamelCase__)
a__: int = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__: Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a__: Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
a__: Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
a__: int = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078])
print(f'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa)
a__: Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa)
a__: str = controlnet_params
a__: List[Any] = """Chef in the kitchen"""
a__: int = jax.device_count()
a__: str = pipe.prepare_text_inputs([prompts] * num_samples)
a__: Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png')
a__: Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples)
a__: int = jax.random.PRNGKey(0)
a__: Tuple = jax.random.split(lowerCamelCase__ , jax.device_count())
a__: Tuple = replicate(lowerCamelCase__)
a__: Optional[Any] = shard(lowerCamelCase__)
a__: int = shard(lowerCamelCase__)
a__: Union[str, Any] = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__: Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a__: str = images[0, 2_53:2_56, 2_53:2_56, -1]
a__: Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
a__: Any = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]])
print(f'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 290
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116
| 0
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A(__a: str , __a: Union[str, Any] , __a: Union[str, Any] , __a: Any=5 ):
assert masked_input.count("<mask>" ) == 1
lowerCAmelCase_ = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase_ = model(A__ )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase_ = logits[0, masked_index, :]
lowerCAmelCase_ = logits.softmax(dim=0 )
lowerCAmelCase_ = prob.topk(k=A__ , dim=0 )
lowerCAmelCase_ = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
lowerCAmelCase_ = tokenizer.mask_token
lowerCAmelCase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
lowerCAmelCase_ = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCamelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCamelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 350
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 0
|
import requests
def A ( lowercase , lowercase ) -> None:
'''simple docstring'''
UpperCamelCase = {'Content-Type': 'application/json'}
UpperCamelCase = requests.post(lowercase , json={'text': message_body} , headers=lowercase )
if response.status_code != 200:
UpperCamelCase = (
'Request to slack returned an error '
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 222
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222
| 1
|
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list[int] , lowercase__ : int ) -> bool:
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
lowerCAmelCase_ : Optional[Any] = len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
__UpperCAmelCase = [int(item.strip()) for item in user_input.split(',')]
__UpperCAmelCase = int(input('Enter the number to be found in the list:\n').strip())
__UpperCAmelCase = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , A=10_00 , ) -> Any:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = range_bbox
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase = bbox[i, j, 3]
lowerCamelCase = bbox[i, j, 1]
lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase = bbox[i, j, 2]
lowerCamelCase = bbox[i, j, 0]
lowerCamelCase = t
lowerCamelCase = tf.convert_to_tensor(_UpperCamelCase )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModel(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForMaskedLM(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFLayoutLMForSequenceClassification(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFLayoutLMForTokenClassification(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForQuestionAnswering(config=_UpperCamelCase )
lowerCamelCase = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
lowerCamelCase
) = config_and_inputs
lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = False
UpperCamelCase : Dict = True
UpperCamelCase : Dict = 1_0
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFLayoutLMModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the sequence output on [0, :3, :3]
lowerCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _UpperCamelCase , atol=1e-3 ) )
@slow
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase = outputs.loss
lowerCamelCase = (2,)
self.assertEqual(loss.shape , _UpperCamelCase )
# test the shape of the logits
lowerCamelCase = outputs.logits
lowerCamelCase = (2, 2)
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
# test the shape of the logits
lowerCamelCase = outputs.logits
lowerCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the shape of the logits
lowerCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , _UpperCamelCase )
| 252
|
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =LongformerTokenizer
lowercase_ : Dict =True
lowercase_ : Dict =LongformerTokenizerFast
lowercase_ : str =True
def A__ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase = dict(zip(A__ ,range(len(A__))))
lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase = {'''unk_token''': '''<unk>'''}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A__) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A__))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,A__):
lowercase = '''lower newer'''
lowercase = '''lower newer'''
return input_text, output_text
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map)
lowercase = '''lower newer'''
lowercase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase = tokenizer.tokenize(A__) # , add_prefix_space=True)
self.assertListEqual(A__ ,A__)
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,A__)
def A__ ( self):
lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' ,add_special_tokens=A__) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2])
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' ,add_special_tokens=A__) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] ,)
@slow
def A__ ( self):
lowercase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.encode(
'''sequence builders''' ,add_special_tokens=A__ ,add_prefix_space=A__)
lowercase = tokenizer.encode(
'''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=A__ ,add_prefix_space=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = '''Encode this sequence.'''
lowercase = tokenizer.byte_encoder[''' '''.encode('''utf-8''')[0]]
# Testing encoder arguments
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__ ,add_prefix_space=A__)
lowercase = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__ ,add_prefix_space=A__)
lowercase = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(A__ ,A__)
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''})
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(A__ ,A__)
# Testing spaces after special tokens
lowercase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A__ ,lstrip=A__ ,rstrip=A__)}) # mask token has a left space
lowercase = tokenizer.convert_tokens_to_ids(A__)
lowercase = '''Encode <mask> sequence'''
lowercase = '''Encode <mask>sequence'''
lowercase = tokenizer.encode(A__)
lowercase = encoded.index(A__)
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(A__ ,A__)
lowercase = tokenizer.encode(A__)
lowercase = encoded.index(A__)
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(A__ ,A__)
def A__ ( self):
pass
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = '''A, <mask> AllenNLP sentence.'''
lowercase = tokenizer_r.encode_plus(A__ ,add_special_tokens=A__ ,return_token_type_ids=A__)
lowercase = tokenizer_p.encode_plus(A__ ,add_special_tokens=A__ ,return_token_type_ids=A__)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) ,sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) ,sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) ,)
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
A__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
A__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
def A__ ( self):
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2):
lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] ,A__)
self.assertEqual(post_processor_state['''add_prefix_space'''] ,A__)
self.assertEqual(post_processor_state['''trim_offsets'''] ,A__)
def A__ ( self):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase = f'{text_of_1_token} {text_of_1_token}'
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(A__) + 1, len(A__) + 1 + len(A__)) ,)
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(A__) + 1, len(A__) + 1 + len(A__)) ,)
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(A__), len(A__) + 1 + len(A__)) ,)
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(A__), len(A__) + 1 + len(A__)) ,)
lowercase = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A__) + 1, 1 + len(A__) + 1 + len(A__)) ,)
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A__), 1 + len(A__) + 1 + len(A__)) ,)
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,add_prefix_space=A__ ,trim_offsets=A__)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A__), 1 + len(A__) + 1 + len(A__)) ,)
| 355
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''')
lowercase = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] ,dtype=tf.intaa), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa),
}
lowercase = model(A__)['''last_hidden_state''']
lowercase = tf.TensorShape((1, 6, 7_6_8))
self.assertEqual(output.shape ,A__)
# compare the actual values for a slice.
lowercase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 97
| 0
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> int:
"""simple docstring"""
UpperCamelCase = bnb_quantization_config.load_in_abit
UpperCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCamelCase = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
UpperCamelCase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
UpperCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase = []
UpperCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
UpperCamelCase = load_in_abit
UpperCamelCase = load_in_abit
UpperCamelCase = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCamelCase = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
UpperCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase = name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCamelCase = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCamelCase = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
UpperCamelCase = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase = True
UpperCamelCase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , A__=None ) -> List[Any]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase = {}
UpperCamelCase = special_dtypes
UpperCamelCase = no_split_module_classes
UpperCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase = get_balanced_memory(
A__ , low_zero=(device_map == 'balanced_low_0') , max_memory=A__ , **A__ , )
UpperCamelCase = max_memory
UpperCamelCase = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
UpperCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase = '.'.join(A__ )
UpperCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCamelCase = module.weight.data
if module.bias is not None:
UpperCamelCase = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
UpperCamelCase = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
UpperCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
UpperCamelCase = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(A__ , [] )
UpperCamelCase = len(A__ ) > 0
# Check if it is a base model
UpperCamelCase = False
if hasattr(A__ , 'base_model_prefix' ):
UpperCamelCase = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(A__ ) - set(A__ )
UpperCamelCase = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
UpperCamelCase = ['.weight', '.bias']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(A__ , '' )
filtered_module_names.append(A__ )
return filtered_module_names
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
UpperCamelCase = param_name
UpperCamelCase = model
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
# offload weights
UpperCamelCase = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , 'meta' , dtype=A__ , value=torch.empty(*param.size() ) )
| 28
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28
| 1
|
from __future__ import annotations
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : str ): # noqa: E741
while r - l > 1:
a__: Any =(l + r) // 2
if v[m] >= key:
a__: Any =m
else:
a__: Optional[int] =m # noqa: E741
return r
def __lowerCamelCase ( __magic_name__ : list[int] ):
if len(__magic_name__ ) == 0:
return 0
a__: Tuple =[0] * len(__magic_name__ )
a__: Optional[int] =1
a__: Optional[Any] =v[0]
for i in range(1 , len(__magic_name__ ) ):
if v[i] < tail[0]:
a__: Union[str, Any] =v[i]
elif v[i] > tail[length - 1]:
a__: Optional[int] =v[i]
length += 1
else:
a__: Dict =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
class lowerCamelCase__ ( _a ):
def __init__( self : Optional[Any] , _a : PriorTransformer , _a : CLIPVisionModel , _a : CLIPImageProcessor , _a : HeunDiscreteScheduler , _a : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def _lowerCamelCase ( self : Tuple , _a : Tuple , _a : Tuple , _a : Any , _a : Any , _a : List[str] , _a : Any ):
if latents is None:
a__: Any =randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
a__: List[str] =latents.to(_a )
a__: int =latents * scheduler.init_noise_sigma
return latents
def _lowerCamelCase ( self : Dict , _a : str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a__: List[Any] =torch.device(F"cuda:{gpu_id}" )
a__: List[Any] =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowerCamelCase ( self : Any , _a : List[str] , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] , ):
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
a__: Optional[int] =torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
a__: Optional[Any] =self.image_processor(_a , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
a__: int =image.to(dtype=self.image_encoder.dtype , device=_a )
a__: str =self.image_encoder(_a )["last_hidden_state"]
a__: Tuple =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__: Optional[Any] =image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
a__: Union[str, Any] =torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__: int =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[Any] , _a : Union[PIL.Image.Image, List[PIL.Image.Image]] , _a : int = 1 , _a : int = 2_5 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[torch.FloatTensor] = None , _a : float = 4.0 , _a : int = 6_4 , _a : Optional[str] = "pil" , _a : bool = True , ):
if isinstance(_a , PIL.Image.Image ):
a__: List[str] =1
elif isinstance(_a , torch.Tensor ):
a__: List[Any] =image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
a__: int =len(_a )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}" )
a__: Optional[int] =self._execution_device
a__: Optional[int] =batch_size * num_images_per_prompt
a__: Any =guidance_scale > 1.0
a__: int =self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
a__: int =self.scheduler.timesteps
a__: str =self.prior.config.num_embeddings
a__: Dict =self.prior.config.embedding_dim
a__: Dict =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__: int =latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
a__: Union[str, Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__: Tuple =self.scheduler.scale_model_input(_a , _a )
a__: Tuple =self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
a__ , a__: List[str] =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__: Union[str, Any] =noise_pred.chunk(2 )
a__: Union[str, Any] =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__: Any =self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
a__: List[str] =[]
for i, latent in enumerate(_a ):
print()
a__: Any =self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_a )
a__: Tuple =torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
a__: Dict =images.cpu().numpy()
if output_type == "pil":
a__: Optional[int] =[self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 42
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : int ):
'''simple docstring'''
lowercase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
lowercase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
lowercase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
lowercase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
lowercase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
lowercase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
lowercase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
lowercase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
lowercase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
lowercase = key.replace('image_encoder.module' , 'flava.image_model' )
lowercase = key.replace('text_encoder.module' , 'flava.text_model' )
lowercase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
lowercase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
lowercase = key.replace('text_projection' , 'flava.text_projection' )
lowercase = key.replace('image_projection' , 'flava.image_projection' )
lowercase = value.float()
for key, value in codebook_state_dict.items():
lowercase = value
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any]=None ):
'''simple docstring'''
if config_path is not None:
lowercase = FlavaConfig.from_pretrained(__snake_case )
else:
lowercase = FlavaConfig()
lowercase = FlavaForPreTraining(__snake_case ).eval()
lowercase = convert_dalle_checkpoint(__snake_case , __snake_case , save_checkpoint=__snake_case )
if os.path.exists(__snake_case ):
lowercase = torch.load(__snake_case , map_location='cpu' )
else:
lowercase = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' )
lowercase = upgrade_state_dict(__snake_case , __snake_case )
hf_model.load_state_dict(__snake_case )
lowercase = hf_model.state_dict()
lowercase = count_parameters(__snake_case )
lowercase = count_parameters(__snake_case ) + count_parameters(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1e-3 )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 220
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_UpperCamelCase : Tuple = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 220
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase : Optional[int] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/'''))
A__ = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase__ , '''src/diffusers/schedulers/scheduling_ddpm.py''') , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''') , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=None) ->int:
'''simple docstring'''
A__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
A__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
A__ = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__)
A__ = os.path.join(self.diffusers_dir , '''new_code.py''')
with open(UpperCAmelCase__ , '''w''' , newline='''\n''') as f:
f.write(UpperCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase__)
with open(UpperCAmelCase__ , '''r''') as f:
self.assertTrue(f.read() , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''')
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCAmelCase__) , )
# Copy consistency with a really long name
A__ = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCAmelCase__ , UpperCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCAmelCase__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCAmelCase__) , )
| 351
|
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0) ->None:
'''simple docstring'''
A__ , A__ = row, column
A__ = [[default_value for c in range(UpperCAmelCase__)] for r in range(UpperCAmelCase__)]
def __str__( self : List[str]) ->str:
'''simple docstring'''
A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(UpperCAmelCase__ , len(str(UpperCAmelCase__)))
A__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float]) -> str:
nonlocal string_format_identifier
A__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__) for row_vector in self.array)
return s
def __repr__( self : Tuple) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple)) and len(UpperCAmelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
A__ = value
def __add__( self : Optional[int] , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : str) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] , UpperCAmelCase__ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (int, float)): # Scalar multiplication
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = f"""Unsupported type given for another ({type(UpperCAmelCase__)})"""
raise TypeError(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 231
| 0
|
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
'''simple docstring'''
A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str
snake_case__ : List[str]
snake_case__ : Optional[List[str]]
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : List[int]
snake_case__ : List[int]
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[List[int]] = None
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
snake_case__ : str = "train"
snake_case__ : Optional[int] = "dev"
snake_case__ : Union[str, Any] = "test"
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : List[str]=-1_0_0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , ) -> List[InputFeatures]:
a_ : Dict = {label: i for i, label in enumerate(a__ )}
a_ : str = []
for ex_index, example in enumerate(a__ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' , a__ , len(a__ ) )
a_ : Tuple = []
a_ : Optional[int] = []
for word, label in zip(example.words , example.labels ):
a_ : Optional[int] = tokenizer.tokenize(a__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a__ ) > 0:
tokens.extend(a__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a_ : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(a__ ) > max_seq_length - special_tokens_count:
a_ : List[Any] = tokens[: (max_seq_length - special_tokens_count)]
a_ : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a_ : List[Any] = [sequence_a_segment_id] * len(a__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a_ : Union[str, Any] = [cls_token] + tokens
a_ : Tuple = [pad_token_label_id] + label_ids
a_ : Tuple = [cls_token_segment_id] + segment_ids
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(a__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a_ : int = [1 if mask_padding_with_zero else 0] * len(a__ )
# Zero-pad up to the sequence length.
a_ : Optional[Any] = max_seq_length - len(a__ )
if pad_on_left:
a_ : int = ([pad_token] * padding_length) + input_ids
a_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a_ : int = ([pad_token_segment_id] * padding_length) + segment_ids
a_ : List[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(a__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(a__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(a__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(a__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(a__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a_ : List[Any] = None
features.append(
InputFeatures(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , label_ids=a__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
snake_case__ : List[InputFeatures]
snake_case__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : List[str] = Split.train , ) -> str:
a_ : int = os.path.join(
a__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(a__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ : Tuple = cached_features_file + '.lock'
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
a_ : Optional[int] = torch.load(a__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
a_ : Union[str, Any] = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
a_ : Tuple = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , a__ )
def __len__( self : int ) -> List[Any]:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
snake_case__ : List[InputFeatures]
snake_case__ : int = -100
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any] = Split.train , ) -> Optional[int]:
a_ : Optional[Any] = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
a_ : Dict = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a_ : List[str] = tf.data.Dataset.from_generator(
a__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a_ : Optional[Any] = tf.data.Dataset.from_generator(
a__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> List[Any]:
return len(self.features )
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[int] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class __magic_name__ ( tf.Module):
def __init__( self : str , lowerCamelCase__ : str ) -> str:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Any = tokenizer
UpperCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = TFGPTaLMHeadModel.from_config(lowerCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.tokenizer(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = tokenized['''input_ids'''].to_tensor()
UpperCamelCase__ : Dict = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCamelCase__ : Optional[int] = self.model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Optional[Any] = [GPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCamelCase__ : Any = [TFGPTaTokenizer.from_pretrained(lowerCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ : List[Any] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
UpperCamelCase__ : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCamelCase__ : Any = tokenizer([test_inputs] , return_tensors='''tf''' )
UpperCamelCase__ : List[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCamelCase__ : Union[str, Any] = python_outputs[key].numpy()
UpperCamelCase__ : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : str = tf.function(lowerCamelCase__ )
for test_inputs in self.test_sentences:
UpperCamelCase__ : Optional[int] = tf.constant(lowerCamelCase__ )
UpperCamelCase__ : Tuple = compiled_tokenizer(lowerCamelCase__ )
UpperCamelCase__ : Tuple = tf_tokenizer(lowerCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : List[str] = ModelToSave(tokenizer=lowerCamelCase__ )
UpperCamelCase__ : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : str = model.serving(lowerCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ : Optional[Any] = Path(lowerCamelCase__ ) / '''saved.model'''
tf.saved_model.save(lowerCamelCase__ , lowerCamelCase__ , signatures={'''serving_default''': model.serving} )
UpperCamelCase__ : Optional[Any] = tf.saved_model.load(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = loaded_model.signatures['''serving_default'''](lowerCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : Optional[Any] = tf_tokenizer(lowerCamelCase__ ) # Build model with some sample inputs
UpperCamelCase__ : Dict = tf_tokenizer.get_config()
UpperCamelCase__ : Any = TFGPTaTokenizer.from_config(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = model_from_config(lowerCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCamelCase__ : List[Any] = 123123
for max_length in [3, 5, 1024]:
UpperCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : Optional[Any] = tf_tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 146
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, **lowerCamelCase, )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, working_dir=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 207
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :int = logging.get_logger(__name__)
__a :int = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 'time_series_transformer'
_lowerCamelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "student_t" , UpperCAmelCase : str = "nll" , UpperCAmelCase : int = 1 , UpperCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase : Optional[Union[str, bool]] = "mean" , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : bool = True , UpperCAmelCase : str = "gelu" , UpperCAmelCase : int = 64 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 100 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : Union[str, Any]=True , **UpperCAmelCase : List[str] , ):
# time series specific configuration
A_ = prediction_length
A_ = context_length or prediction_length
A_ = distribution_output
A_ = loss
A_ = input_size
A_ = num_time_features
A_ = lags_sequence
A_ = scaling
A_ = num_dynamic_real_features
A_ = num_static_real_features
A_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A_ = cardinality
else:
A_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A_ = embedding_dimension
else:
A_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ = num_parallel_samples
# Transformer architecture configuration
A_ = input_size * len(UpperCAmelCase ) + self._number_of_features
A_ = d_model
A_ = encoder_attention_heads
A_ = decoder_attention_heads
A_ = encoder_ffn_dim
A_ = decoder_ffn_dim
A_ = encoder_layers
A_ = decoder_layers
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = activation_function
A_ = init_std
A_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 329
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329
| 1
|
import math
import flax.linen as nn
import jax.numpy as jnp
def a__ ( A_, A_, A_ = 1, A_ = 1, A_ = 1.0e4, A_ = False, A_ = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__magic_name__ = float(embedding_dim // 2 )
__magic_name__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__magic_name__ = min_timescale * jnp.exp(jnp.arange(A_, dtype=jnp.floataa ) * -log_timescale_increment )
__magic_name__ = jnp.expand_dims(A_, 1 ) * jnp.expand_dims(A_, 0 )
# scale embeddings
__magic_name__ = scale * emb
if flip_sin_to_cos:
__magic_name__ = jnp.concatenate([jnp.cos(A_ ), jnp.sin(A_ )], axis=1 )
else:
__magic_name__ = jnp.concatenate([jnp.sin(A_ ), jnp.cos(A_ )], axis=1 )
__magic_name__ = jnp.reshape(A_, [jnp.shape(A_ )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 32
a__ = jnp.floataa
@nn.compact
def __call__( self : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCamelCase__ )
__magic_name__ = nn.silu(UpperCamelCase__ )
__magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCamelCase__ )
return temb
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 32
a__ = False
a__ = 1
@nn.compact
def __call__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return get_sinusoidal_embeddings(
UpperCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 88
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__(self : List[str] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: str =size if size is not None else {"shortest_edge": 256}
lowerCamelCase__: Union[str, Any] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: Any =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
lowerCamelCase__: List[Any] =do_resize
lowerCamelCase__: Tuple =size
lowerCamelCase__: Any =do_center_crop
lowerCamelCase__: Dict =crop_size
lowerCamelCase__: List[Any] =resample
lowerCamelCase__: Union[str, Any] =do_rescale
lowerCamelCase__: Tuple =rescale_factor
lowerCamelCase__: Tuple =offset
lowerCamelCase__: Any =do_normalize
lowerCamelCase__: int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: List[str] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: List[Any] =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" in size:
lowerCamelCase__: Tuple =get_resize_output_image_size(UpperCAmelCase_ , size["shortest_edge"] , default_to_square=UpperCAmelCase_)
elif "height" in size and "width" in size:
lowerCamelCase__: Union[str, Any] =(size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Dict =get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =image.astype(np.floataa)
if offset:
lowerCamelCase__: Any =image - (scale / 2)
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True.")
# All transformations expect numpy arrays.
lowerCamelCase__: int =to_numpy_array(UpperCAmelCase_)
if do_resize:
lowerCamelCase__: Tuple =self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_)
if do_center_crop:
lowerCamelCase__: Tuple =self.center_crop(UpperCAmelCase_ , size=UpperCAmelCase_)
if do_rescale:
lowerCamelCase__: Optional[Any] =self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , offset=UpperCAmelCase_)
if do_normalize:
lowerCamelCase__: str =self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_)
lowerCamelCase__: List[str] =to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_)
return image
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Dict , ) ->PIL.Image.Image:
'''simple docstring'''
lowerCamelCase__: Any =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: Any =resample if resample is not None else self.resample
lowerCamelCase__: Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: int =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: Optional[Any] =offset if offset is not None else self.offset
lowerCamelCase__: int =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Dict =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: int =image_std if image_std is not None else self.image_std
lowerCamelCase__: Union[str, Any] =size if size is not None else self.size
lowerCamelCase__: str =get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: List[str] =get_size_dict(UpperCAmelCase_ , param_name="crop_size")
if not valid_images(UpperCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
lowerCamelCase__: List[Any] =make_batched(UpperCAmelCase_)
lowerCamelCase__: Dict =[
[
self._preprocess_image(
image=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , crop_size=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ , rescale_factor=UpperCAmelCase_ , offset=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , )
for img in video
]
for video in videos
]
lowerCamelCase__: Tuple ={"pixel_values": videos}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 360
|
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273
| 0
|
'''simple docstring'''
from typing import List
import numpy as np
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = {key: len(A__ ) for key, value in gen_kwargs.items() if isinstance(A__ , A__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
UpperCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , A__ )
def __lowerCamelCase ( A__ , A__ ) -> List[range]:
"""simple docstring"""
UpperCamelCase = []
for group_idx in range(A__ ):
UpperCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase = range(A__ , start + num_shards_to_add )
shards_indices_per_group.append(A__ )
return shards_indices_per_group
def __lowerCamelCase ( A__ , A__ ) -> List[dict]:
"""simple docstring"""
UpperCamelCase = _number_of_shards_in_gen_kwargs(A__ )
if num_shards == 1:
return [dict(A__ )]
else:
UpperCamelCase = _distribute_shards(num_shards=A__ , max_num_jobs=A__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A__ , A__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A__ ) )
]
def __lowerCamelCase ( A__ ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCamelCase ( A__ , A__ ) -> dict:
"""simple docstring"""
UpperCamelCase = {len(A__ ) for value in gen_kwargs.values() if isinstance(A__ , A__ )}
UpperCamelCase = {}
for size in list_sizes:
UpperCamelCase = list(range(A__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase = dict(A__ )
for key, value in shuffled_kwargs.items():
if isinstance(A__ , A__ ):
UpperCamelCase = [value[i] for i in indices_per_size[len(A__ )]]
return shuffled_kwargs
| 28
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28
| 1
|
'''simple docstring'''
import re
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
_a = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__snake_case , __snake_case ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 364
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 179
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """segformer"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=[2, 2, 2, 2] , UpperCamelCase__ : List[Any]=[8, 4, 2, 1] , UpperCamelCase__ : Union[str, Any]=[3_2, 6_4, 1_6_0, 2_5_6] , UpperCamelCase__ : Dict=[7, 3, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2, 2] , UpperCamelCase__ : int=[1, 2, 5, 8] , UpperCamelCase__ : str=[4, 4, 4, 4] , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[int]=1E-6 , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : List[Any]=2_5_5 , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(**__a )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , __a , )
UpperCamelCase = num_channels
UpperCamelCase = num_encoder_blocks
UpperCamelCase = depths
UpperCamelCase = sr_ratios
UpperCamelCase = hidden_sizes
UpperCamelCase = patch_sizes
UpperCamelCase = strides
UpperCamelCase = mlp_ratios
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = drop_path_rate
UpperCamelCase = layer_norm_eps
UpperCamelCase = decoder_hidden_size
UpperCamelCase = kwargs.get('reshape_last_stage' , __a )
UpperCamelCase = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Tuple ):
"""simple docstring"""
return 1E-4
@property
def A ( self : List[Any] ):
"""simple docstring"""
return 1_2
| 28
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = StableDiffusionPanoramaPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler()
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCamelCase = CLIPTextModel(__a)
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self , __a , __a=0) -> int:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = '''french fries'''
_UpperCamelCase = sd_pipe(**__a , negative_prompt=__a)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a , view_batch_size=2)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''')
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__a)
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , __a=0) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_UpperCamelCase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__a)
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = 0
def callback_fn(__a , __a , __a) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
_UpperCamelCase = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
_UpperCamelCase = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 194
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__:
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int=sys.maxsize )-> str:
"""simple docstring"""
UpperCAmelCase = '''bilinear'''
UpperCAmelCase = max_size
UpperCAmelCase = short_edge_length
def __call__( self : Any , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase = size * 1.0 / min(lowerCAmelCase , lowerCAmelCase )
if h < w:
UpperCAmelCase , UpperCAmelCase = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase = scale * h, size
if max(lowerCAmelCase , lowerCAmelCase ) > self.max_size:
UpperCAmelCase = self.max_size * 1.0 / max(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = newh * scale
UpperCAmelCase = neww * scale
UpperCAmelCase = int(neww + 0.5 )
UpperCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase = Image.fromarray(lowerCAmelCase )
UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase = np.asarray(lowerCAmelCase )
else:
UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase = nn.functional.interpolate(
lowerCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase ).squeeze(0 )
img_augs.append(lowerCAmelCase )
return img_augs
class UpperCamelCase__:
def __init__( self : int , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase = cfg.INPUT.FORMAT
UpperCAmelCase = cfg.SIZE_DIVISIBILITY
UpperCAmelCase = cfg.PAD_VALUE
UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase = cfg.MODEL.DEVICE
UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase = lambda lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def a__( self : Tuple , lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = tuple(max(lowerCAmelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase = [im.shape[-2:] for im in images]
UpperCAmelCase = [
nn.functional.pad(
lowerCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase , lowerCAmelCase )
]
return torch.stack(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
def __call__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=False )-> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [images]
if single_image:
assert len(lowerCAmelCase ) == 1
for i in range(len(lowerCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase , images.pop(lowerCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase = self.aug(lowerCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase = [self.normalizer(lowerCAmelCase ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase = self.pad(lowerCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase = torch.true_divide(lowerCAmelCase , lowerCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( A : Dict , A : Optional[int] ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( A : Any , A : Tuple[int, int] ):
'''simple docstring'''
assert torch.isfinite(A ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=A )
tensor[:, 1].clamp_(min=0 , max=A )
tensor[:, 2].clamp_(min=0 , max=A )
tensor[:, 3].clamp_(min=0 , max=A )
| 356
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = ["image_processor", "tokenizer"]
__magic_name__ : Tuple = "ViTImageProcessor"
__magic_name__ : int = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple )-> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : str )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 91
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
A : List[str] = parent
A : int = batch_size
A : Optional[int] = image_size
A : int = num_channels
A : List[Any] = embeddings_size
A : str = hidden_sizes
A : Any = depths
A : List[str] = is_training
A : Any = use_labels
A : Any = hidden_act
A : Tuple = num_labels
A : List[str] = scope
A : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : str = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : int = RegNetModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : str = self.num_labels
A : List[Any] = RegNetForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
A, A, A : Optional[int] = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = RegNetModelTester(self )
A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Tuple = model_class(config=SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Any = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE )
A : Dict = self.default_image_processor
A : int = prepare_img()
A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 3
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207
| 1
|
import sys
_A = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = N ):
__UpperCamelCase =-sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ):
__UpperCamelCase =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase =product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt"}
__magic_name__ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
__magic_name__ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
__magic_name__ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ConvBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop("""type"""))
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = do_lower_case
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 255
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 255
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
__magic_name__ = value
else:
__magic_name__ = value
return new_state_dict
def a__ ( A_, A_=False ):
'''simple docstring'''
__magic_name__ = """"""
if is_panoptic:
__magic_name__ = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:256, :]
__magic_name__ = in_proj_bias[:256]
__magic_name__ = in_proj_weight[256:512, :]
__magic_name__ = in_proj_bias[256:512]
__magic_name__ = in_proj_weight[-256:, :]
__magic_name__ = in_proj_bias[-256:]
def a__ ( ):
'''simple docstring'''
__magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__magic_name__ = """resnet101"""
if "dc5" in model_name:
__magic_name__ = True
__magic_name__ = """panoptic""" in model_name
if is_panoptic:
__magic_name__ = 250
else:
__magic_name__ = 91
__magic_name__ = """huggingface/label-files"""
__magic_name__ = """coco-detection-id2label.json"""
__magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(A_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
# load image processor
__magic_name__ = """coco_panoptic""" if is_panoptic else """coco_detection"""
__magic_name__ = ConditionalDetrImageProcessor(format=A_ )
# prepare image
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A_, return_tensors="""pt""" )
__magic_name__ = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
__magic_name__ = torch.hub.load("""DeppMeng/ConditionalDETR""", A_, pretrained=A_ ).eval()
__magic_name__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__magic_name__ = """conditional_detr.""" + src
rename_key(A_, A_, A_ )
__magic_name__ = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_, is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__magic_name__ = state_dict.pop(A_ )
__magic_name__ = val
# finally, create HuggingFace model and load state dict
__magic_name__ = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
__magic_name__ = conditional_detr(A_ )
__magic_name__ = model(A_ )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 88
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88
| 1
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase__ : str = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
lowercase__ : List[Any] = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
lowercase__ : str = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Any=False ):
if rouge_types is None:
lowerCAmelCase_ : Tuple = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
lowerCAmelCase_ : Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE_ , use_stemmer=SCREAMING_SNAKE_CASE_ )
if use_aggregator:
lowerCAmelCase_ : List[str] = scoring.BootstrapAggregator()
else:
lowerCAmelCase_ : Any = []
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[int] = scorer.score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if use_aggregator:
aggregator.add_scores(SCREAMING_SNAKE_CASE_ )
else:
scores.append(SCREAMING_SNAKE_CASE_ )
if use_aggregator:
lowerCAmelCase_ : List[Any] = aggregator.aggregate()
else:
lowerCAmelCase_ : Union[str, Any] = {}
for key in scores[0]:
lowerCAmelCase_ : Tuple = [score[key] for score in scores]
return result
| 289
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """open-llama"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : str="silu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = rms_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_dropout_prob
lowerCAmelCase_ : Tuple = use_stable_embedding
lowerCAmelCase_ : Optional[Any] = shared_input_output_embedding
lowerCAmelCase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase_ : int = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 289
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: int | str ):
_UpperCAmelCase : List[Any] = str(a_ )
return n == n[::-1]
def __UpperCAmelCase ( a_: int = 1_000_000 ):
_UpperCAmelCase : List[Any] = 0
for i in range(1, a_ ):
if is_palindrome(a_ ) and is_palindrome(bin(a_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 145
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : str = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase : str = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase : Any = model(input_ids.to(lowerCAmelCase__ ) , labels=labels.to(lowerCAmelCase__ ) ).loss
_UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 145
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : int = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "bridgetower_vision_model"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Union[str, Any]=1_2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : int=2_8_8 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Any=1e-05 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[int] = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_channels
a : Tuple = patch_size
a : Optional[int] = image_size
a : Union[str, Any] = initializer_factor
a : Optional[Any] = layer_norm_eps
a : Dict = stop_gradient
a : Dict = share_layernorm
a : Any = remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : List[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
if config_dict.get('model_type') == "bridgetower":
a : Tuple = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Union[str, Any] = "bridgetower_text_model"
def __init__( self : Any , UpperCAmelCase_ : int=5_0_2_6_5 , UpperCAmelCase_ : List[str]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Union[str, Any]=1_2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=5_1_4 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[Any]=1e-05 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[Any] = vocab_size
a : Dict = hidden_size
a : Optional[Any] = num_hidden_layers
a : str = num_attention_heads
a : Any = hidden_act
a : Optional[int] = initializer_factor
a : Any = intermediate_size
a : int = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Dict = type_vocab_size
a : str = layer_norm_eps
a : List[str] = position_embedding_type
a : Any = use_cache
a : List[Any] = pad_token_id
a : Optional[int] = bos_token_id
a : List[Any] = eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Union[str, Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
if config_dict.get('model_type') == "bridgetower":
a : int = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "bridgetower"
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=7_6_8 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=1e-05 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any="add" , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : Optional[Any] = kwargs.pop('text_config_dict' , UpperCAmelCase_)
a : Optional[int] = kwargs.pop('vision_config_dict' , UpperCAmelCase_)
super().__init__(**UpperCAmelCase_)
a : str = share_cross_modal_transformer_layers
a : List[str] = hidden_act
a : Union[str, Any] = hidden_size
a : Optional[Any] = initializer_factor
a : List[str] = layer_norm_eps
a : str = share_link_tower_layers
a : List[str] = link_tower_type
a : Union[str, Any] = num_attention_heads
a : Any = num_hidden_layers
a : Union[str, Any] = tie_word_embeddings
a : str = init_layernorm_from_vision_encoder
if text_config is None:
a : Any = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.')
if vision_config is None:
a : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.')
a : Optional[Any] = BridgeTowerTextConfig(**UpperCAmelCase_)
a : int = BridgeTowerVisionConfig(**UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , UpperCAmelCase_ : BridgeTowerTextConfig , UpperCAmelCase_ : BridgeTowerVisionConfig , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = copy.deepcopy(self.__dict__)
a : Tuple = self.text_config.to_dict()
a : Any = self.vision_config.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 357
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
_lowerCamelCase : Tuple = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowerCamelCase : Optional[Any] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_lowerCamelCase : Union[str, Any] = BeautifulSoup(res.text, '''html.parser''')
_lowerCamelCase : Tuple = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 282
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case : Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""")
parser.add_argument("""--file_path""" , type=a__ , default="""data/dump.txt""" , help="""The path to the data.""")
parser.add_argument("""--tokenizer_type""" , type=a__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""])
parser.add_argument("""--tokenizer_name""" , type=a__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""")
parser.add_argument("""--dump_file""" , type=a__ , default="""data/dump""" , help="""The dump file prefix.""")
a_ : int = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''')
if args.tokenizer_type == "bert":
a_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name)
a_ : Tuple = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
a_ : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name)
a_ : str = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
a_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name)
a_ : Optional[int] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
a_ : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''')
with open(args.file_path , """r""" , encoding="""utf8""") as fp:
a_ : Optional[Any] = fp.readlines()
logger.info("""Start encoding""")
logger.info(f'''{len(a__)} examples to process.''')
a_ : str = []
a_ : Optional[Any] = 0
a_ : str = 1_0_0_0_0
a_ : List[str] = time.time()
for text in data:
a_ : int = f'''{bos} {text.strip()} {sep}'''
a_ : str = tokenizer.encode(a__ , add_special_tokens=a__)
rslt.append(a__)
iter += 1
if iter % interval == 0:
a_ : str = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''')
a_ : int = time.time()
logger.info("""Finished binarization""")
logger.info(f'''{len(a__)} examples processed.''')
a_ : Union[str, Any] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
a_ : int = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
a_ : List[Any] = [np.uintaa(a__) for d in rslt]
else:
a_ : List[str] = [np.intaa(a__) for d in rslt]
random.shuffle(rslt_)
logger.info(f'''Dump to {dp_file}''')
with open(a__ , """wb""") as handle:
pickle.dump(rslt_ , a__ , protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 248
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
"nielsr/canine-s": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase__ : List[str] = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Tuple = 0Xe000
lowerCAmelCase__ : Optional[Any] = 0Xe001
lowerCAmelCase__ : Tuple = 0Xe002
lowerCAmelCase__ : Union[str, Any] = 0Xe003
lowerCAmelCase__ : Dict = 0Xe004
# Maps special codepoints to human-readable names.
lowerCAmelCase__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase_ : int=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : List[str]=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : Union[str, Any]=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : int=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : Any=chr(UpperCAmelCase_ ) , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=2_048 , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
__UpperCAmelCase : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
__UpperCAmelCase : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
__UpperCAmelCase : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCAmelCase : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCAmelCase : List[Any] = UNICODE_VOCAB_SIZE
__UpperCAmelCase : str = len(self._special_codepoints )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self._unicode_vocab_size
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
return list(UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
try:
return ord(UpperCAmelCase_ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return "".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_ )) + [1]
return result
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Optional[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
return ()
| 37
|
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
try:
__UpperCAmelCase : Union[str, Any] = float(_UpperCAmelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCAmelCase : str = decimal - int(_UpperCAmelCase )
if fractional_part == 0:
return int(_UpperCAmelCase ), 1
else:
__UpperCAmelCase : Dict = len(str(_UpperCAmelCase ).split("." )[1] )
__UpperCAmelCase : List[Any] = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase : Any = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase : Tuple = denominator, numerator
while True:
__UpperCAmelCase : Dict = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase : str = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase : List[str] = numerator / divisor, denominator / divisor
return int(_UpperCAmelCase ), int(_UpperCAmelCase )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 37
| 1
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_UpperCamelCase : Dict = 'CompVis/stable-diffusion-v1-1'
_UpperCamelCase : List[str] = 'CompVis/stable-diffusion-v1-2'
_UpperCamelCase : Optional[int] = 'CompVis/stable-diffusion-v1-3'
_UpperCamelCase : str = 'CompVis/stable-diffusion-v1-4'
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
super()._init_()
lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
lowercase = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase_ ( self ):
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase_ ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
lowercase = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 220
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__snake_case )
# Let's go
lowercase = parser.parse_args()
if not hasattr(__snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase = args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 220
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case__ : Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( _snake_case : Tuple ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Tuple ):
return max(metric_fn(_snake_case , _snake_case ) for gt in ground_truths )
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
lowerCAmelCase : str = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
lowerCAmelCase : str = []
if args.gold_data_mode == "qa":
lowerCAmelCase : List[Any] = pd.read_csv(_snake_case , sep='''\t''' , header=_snake_case )
for answer_list in data[1]:
lowerCAmelCase : List[Any] = ast.literal_eval(_snake_case )
answers.append(_snake_case )
else:
lowerCAmelCase : List[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
lowerCAmelCase : Tuple = [[reference] for reference in references]
lowerCAmelCase : List[Any] = 0
for prediction, ground_truths in zip(_snake_case , _snake_case ):
total += 1
em += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
fa += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
lowerCAmelCase : List[str] = 100.0 * em / total
lowerCAmelCase : Dict = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def _snake_case ( _snake_case : Any , _snake_case : str , _snake_case : List[str] ):
lowerCAmelCase : Tuple = args.k
lowerCAmelCase : List[str] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
lowerCAmelCase : Optional[Any] = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
lowerCAmelCase : Optional[Any] = 0
for hypo, reference in zip(_snake_case , _snake_case ):
lowerCAmelCase : List[Any] = set(hypo.split('''\t''' )[:k] )
lowerCAmelCase : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase : List[Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
def strip_title(_snake_case : Tuple ):
if title.startswith('''"''' ):
lowerCAmelCase : Any = title[1:]
if title.endswith('''"''' ):
lowerCAmelCase : Tuple = title[:-1]
return title
lowerCAmelCase : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case , )['''input_ids'''].to(args.device )
lowerCAmelCase : List[str] = rag_model.rag.question_encoder(_snake_case )
lowerCAmelCase : Dict = question_enc_outputs[0]
lowerCAmelCase : Dict = rag_model.retriever(
_snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowerCAmelCase : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase : Any = []
for docs in all_docs:
lowerCAmelCase : Optional[int] = [strip_title(_snake_case ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_snake_case ) )
return provenance_strings
def _snake_case ( _snake_case : int , _snake_case : Any , _snake_case : List[str] ):
with torch.no_grad():
lowerCAmelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case )
lowerCAmelCase : Union[str, Any] = inputs_dict.input_ids.to(args.device )
lowerCAmelCase : Dict = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase : Any = rag_model.generate( # rag_model overwrites generate
_snake_case , attention_mask=_snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
if args.print_predictions:
for q, a in zip(_snake_case , _snake_case ):
logger.info('''Q: {} - A: {}'''.format(_snake_case , _snake_case ) )
return answers
def _snake_case ( ):
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_snake_case , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_snake_case , choices=['''exact''', '''compressed''', '''legacy'''] , type=_snake_case , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_snake_case , type=_snake_case , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_snake_case , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_snake_case , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_snake_case , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_snake_case , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_snake_case , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_snake_case , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_snake_case , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_snake_case , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_snake_case , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowerCAmelCase : int = parser.parse_args()
lowerCAmelCase : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _snake_case ( _snake_case : int ):
lowerCAmelCase : Optional[int] = {}
if args.model_type is None:
lowerCAmelCase : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowerCAmelCase : Any = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowerCAmelCase : Optional[int] = args.n_docs
if args.index_name is not None:
lowerCAmelCase : Optional[Any] = args.index_name
if args.index_path is not None:
lowerCAmelCase : Any = args.index_path
else:
lowerCAmelCase : str = BartForConditionalGeneration
lowerCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _snake_case )
lowerCAmelCase : str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowerCAmelCase : Tuple = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_snake_case ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowerCAmelCase : List[str] = RagRetriever.from_pretrained(_snake_case , **_snake_case )
lowerCAmelCase : Optional[int] = model_class.from_pretrained(_snake_case , retriever=_snake_case , **_snake_case )
model.retriever.init_retrieval()
else:
lowerCAmelCase : Optional[int] = model_class.from_pretrained(_snake_case , **_snake_case )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowerCAmelCase : Optional[int] = []
for line in tqdm(_snake_case ):
questions.append(line.strip() )
if len(_snake_case ) == args.eval_batch_size:
lowerCAmelCase : List[Any] = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) + '''\n''' )
preds_file.flush()
lowerCAmelCase : List[str] = []
if len(_snake_case ) > 0:
lowerCAmelCase : str = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) )
preds_file.flush()
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case__ : Optional[Any] = get_args()
main(args)
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
def _lowerCamelCase( a ):
def merge(a , a ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_A ) <= 1:
return collection
__a = len(_A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__:int = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:List[str] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 261
|
from collections import namedtuple
__A : List[str] = namedtuple('from_to', 'from_ to')
__A : int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_01, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_04_54, 2_64.1_72),
'cubicyard': from_to(0.7_64_55, 1.3_07_95),
'cubicfoot': from_to(0.0_28, 35.31_47),
'cup': from_to(0.0_00_23_65_88, 42_26.75),
}
def __UpperCamelCase ( _A : float , _A : str , _A : str ) ->float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "dinat"
A__ : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: int=[2, 4, 8, 16] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase_: Tuple=3.0 ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,**lowerCamelCase_: Dict ,) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Union[str, Any] = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = num_heads
UpperCAmelCase_ : Tuple = kernel_size
UpperCAmelCase_ : int = dilations
UpperCAmelCase_ : Optional[Any] = mlp_ratio
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Optional[int] = layer_scale_init_value
UpperCAmelCase_ : List[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
| 59
|
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations(_a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a : int , _a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase_ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
UpperCAmelCase_ : Dict = answer
return answer
UpperCAmelCase_ : Tuple = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [0] * (target + 1)
UpperCAmelCase_ : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = 3
UpperCamelCase_ = 5
UpperCamelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 59
| 1
|
from itertools import count
def lowerCAmelCase_ ( snake_case_ = 50 ):
_A : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if attention_mask is None:
a__ : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
_lowercase : List[str] = OPTConfig
_lowercase : int = {}
_lowercase : Optional[int] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =parent
a__ : List[str] =batch_size
a__ : Tuple =seq_length
a__ : Dict =is_training
a__ : Tuple =use_labels
a__ : Tuple =vocab_size
a__ : Optional[int] =hidden_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : int =intermediate_size
a__ : Any =hidden_act
a__ : int =hidden_dropout_prob
a__ : List[Any] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Optional[Any] =eos_token_id
a__ : List[str] =pad_token_id
a__ : Optional[Any] =bos_token_id
a__ : Tuple =embed_dim
a__ : Optional[Any] =word_embed_proj_dim
a__ : Union[str, Any] =False
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Tuple =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : Dict =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : int =tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Tuple =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
a__ : Tuple =prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] =TFOPTModel(config=lowerCAmelCase__ )
a__ : Optional[Any] =inputs_dict["input_ids"]
a__ : List[str] =input_ids[:1, :]
a__ : List[str] =inputs_dict["attention_mask"][:1, :]
a__ : Union[str, Any] =1
# first forward pass
a__ : Union[str, Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ , a__ : Union[str, Any] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : Union[str, Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : Optional[int] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : int =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : str =output_from_no_past[:, -3:, random_slice_idx]
a__ : Optional[Any] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : str = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowercase : Dict = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowercase : Union[str, Any] = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
_lowercase : Any = False
_lowercase : str = False
_lowercase : Any = False
_lowercase : Union[str, Any] = 10
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[str] =TFOPTModelTester(self )
a__ : Optional[Any] =ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
a__ : Union[str, Any] =model_class(config=lowerCAmelCase__ )
a__ : List[Any] =_get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
a__ : Optional[Any] =_get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
a__ : Optional[int] =_get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
a__ : Optional[Any] =_get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a__ : Dict =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
a__ : int =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a__ : int =False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
a__ : int =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a__ : Union[str, Any] =False
self.assertTrue(lowerCAmelCase__ )
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : List[str] = 99
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =tf.ones((4, 1) , dtype=tf.intaa ) * 2
a__ : Union[str, Any] =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a__ : List[str] =input_ids.shape[0]
a__ : List[str] =OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =TFOPTModel.from_pretrained("facebook/opt-350m" )
a__ : str =_long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
a__ : int =tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
a__ : Any =model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
a__ : Tuple =(1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase__ )
a__ : List[str] =tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3 ) )
a__ : Any =tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
a__ : List[Any] =xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
a__ : Optional[int] ="facebook/opt-350m"
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =TFOPTForCausalLM.from_pretrained(self.path_model )
a__ : str =GPTaTokenizer.from_pretrained(self.path_model )
a__ : Dict =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a__ : Union[str, Any] =tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : List[str] =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a__ : Tuple =tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
a__ : Union[str, Any] =tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
a__ : Optional[int] =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase):
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] ="facebook/opt-125m"
a__ : Dict =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a__ : Any =[]
a__ : Union[str, Any] =GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple =TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
a__ : List[str] =tokenizer(lowerCAmelCase__ , return_tensors="tf" ).input_ids
a__ : str =model.generate(lowerCAmelCase__ , max_length=1_0 )
a__ : int =tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] ="facebook/opt-350m"
a__ : str =GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Dict =TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
a__ : List[str] ="left"
# use different length sentences to test batching
a__ : Any =[
"Hello, my dog is a little",
"Today, I",
]
a__ : List[Any] =tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
a__ : Optional[Any] =inputs["input_ids"]
a__ : Optional[Any] =model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] )
a__ : int =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a__ : str =model.generate(input_ids=lowerCAmelCase__ )
a__ : List[Any] =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a__ : Tuple =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a__ : Tuple =model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
a__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
a__ : Optional[int] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
a__ : Tuple =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
a__ : Dict =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict ="facebook/opt-350m"
a__ : Tuple =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a__ : Union[str, Any] =[]
a__ : List[str] =GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Any =TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
a__ : Dict =tokenizer(lowerCAmelCase__ , return_tensors="tf" ).input_ids
a__ : Union[str, Any] =model.generate(lowerCAmelCase__ , max_length=1_0 )
a__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 148
|
from maths.prime_check import is_prime
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : Dict =f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 1
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 )-> Union[str, Any]:
'''simple docstring'''
if name is None:
UpperCAmelCase : Tuple =None
else:
UpperCAmelCase : Dict ='''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
UpperCAmelCase : Optional[Any] =fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , ''':''' , val.size() )
else:
print(__lowerCAmelCase , ''':''' , __lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCAmelCase : Optional[Any] =(num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCAmelCase : Dict =param.view(*__lowerCAmelCase )
UpperCAmelCase : Optional[int] =param.transpose(0 , 2 )
UpperCAmelCase : Dict =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCAmelCase : List[str] =(num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCAmelCase : List[Any] =param.view(*__lowerCAmelCase )
UpperCAmelCase : str =param.transpose(0 , 1 ).contiguous()
UpperCAmelCase : Optional[int] =param.view(*__lowerCAmelCase )
return param
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={}
# old versions did not store training args
UpperCAmelCase : List[str] =input_state_dict.get('''args''' , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCAmelCase : List[str] =ds_args.padded_vocab_size
UpperCAmelCase : Tuple =ds_args.max_position_embeddings
UpperCAmelCase : List[str] =ds_args.hidden_size
UpperCAmelCase : str =ds_args.num_layers
UpperCAmelCase : Tuple =ds_args.num_attention_heads
UpperCAmelCase : List[Any] =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCAmelCase : Optional[int] =config.n_head
# The hidden_size per head.
UpperCAmelCase : Any =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCAmelCase : Tuple =input_state_dict['''checkpoint_version''']
else:
UpperCAmelCase : str =0.0
# The model.
UpperCAmelCase : List[Any] =input_state_dict['''model''']
# The language model.
UpperCAmelCase : int =model['''language_model''']
# The embeddings.
UpperCAmelCase : List[Any] =lm['''embedding''']
# The word embeddings.
UpperCAmelCase : List[str] =embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
UpperCAmelCase : int =word_embeddings[: config.vocab_size, :]
UpperCAmelCase : Any =word_embeddings
# The position embeddings.
UpperCAmelCase : Union[str, Any] =embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCAmelCase : List[Any] =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
UpperCAmelCase : Optional[Any] =pos_embeddings
# The transformer.
UpperCAmelCase : int =lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
UpperCAmelCase : Any =re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
UpperCAmelCase : Optional[Any] ={
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCAmelCase : Tuple =layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCAmelCase : str =int(m.group(1 ) )
# The name of the operation.
UpperCAmelCase : Optional[Any] =m.group(2 )
# Is it a weight or a bias?
UpperCAmelCase : int =m.group(3 )
# The name of the layer.
UpperCAmelCase : int =f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
UpperCAmelCase : List[Any] ='''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
UpperCAmelCase : int =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCAmelCase : List[Any] =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] =causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCAmelCase : Optional[int] =torch.tensor(-1e4 , dtype=torch.floataa )
UpperCAmelCase : int =masked_bias
UpperCAmelCase : Optional[Any] =fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCAmelCase : Any =out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCAmelCase : Optional[int] =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCAmelCase : int =fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
UpperCAmelCase : Dict =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCAmelCase : Dict =megatron_to_transformers[op_name]
UpperCAmelCase : Optional[Any] =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCAmelCase : Dict =megatron_to_transformers[op_name]
UpperCAmelCase : List[str] =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCAmelCase : List[Any] =transformer['''final_layernorm.weight''']
UpperCAmelCase : List[str] =transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCAmelCase : List[str] =word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=__lowerCAmelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=__lowerCAmelCase , help='''An optional config json file describing the pre-trained model.''' , )
UpperCAmelCase : Optional[int] =parser.parse_args()
# Extract the basename.
UpperCAmelCase : List[Any] =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
UpperCAmelCase : Tuple =torch.load(__lowerCAmelCase , map_location='''cpu''' )
else:
UpperCAmelCase : Union[str, Any] =torch.load(args.path_to_checkpoint , map_location='''cpu''' )
UpperCAmelCase : int =input_state_dict.get('''args''' , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCAmelCase : Tuple ='''gelu_fast'''
elif ds_args.openai_gelu:
UpperCAmelCase : Any ='''gelu_new'''
else:
UpperCAmelCase : List[str] ='''gelu'''
else:
# in the very early days this used to be "gelu_new"
UpperCAmelCase : List[Any] ='''gelu_new'''
# Spell out all parameters in case the defaults change.
UpperCAmelCase : int =GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
UpperCAmelCase : Union[str, Any] =GPTaConfig.from_json_file(args.config_file )
UpperCAmelCase : int =['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
UpperCAmelCase : Union[str, Any] =convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCAmelCase : str =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCAmelCase : Dict ='''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
UpperCAmelCase : Union[str, Any] =ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
UpperCAmelCase : Optional[Any] ='''gpt2'''
UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCAmelCase : int =type(__lowerCAmelCase ).__name__
UpperCAmelCase : Dict =tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
UpperCAmelCase : Optional[int] =os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348
| 1
|
"""simple docstring"""
UpperCAmelCase : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 313
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Union[str, Any] = [image]
lowerCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
lowerCAmelCase : str = torch.stack(_snake_case )
return image
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = min(int(num_inference_steps * strength ) , __snake_case )
lowerCAmelCase : int = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}""" )
lowerCAmelCase : Dict = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCAmelCase : Any = init_latents.shape
lowerCAmelCase : Optional[Any] = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print("add noise to latents at timestep" , __snake_case )
lowerCAmelCase : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
lowerCAmelCase : Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self , snake_case__ = None , snake_case__ = 0.8 , snake_case__ = 1 , snake_case__ = None , snake_case__ = 0.0 , snake_case__ = 50 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
"""simple docstring"""
self.check_inputs(__snake_case )
# 2. Preprocess image
lowerCAmelCase : Tuple = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
lowerCAmelCase , lowerCAmelCase : List[str] = self.get_timesteps(__snake_case , __snake_case , self.device )
lowerCAmelCase : Optional[Any] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
lowerCAmelCase : List[str] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
lowerCAmelCase : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
lowerCAmelCase : Tuple = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : int = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 108
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[image]
if isinstance(image[0] , PIL.Image.Image ):
__a , __a =image[0].size
__a , __a =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__a =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__a =np.concatenate(_snake_case , axis=0 )
__a =np.array(_snake_case ).astype(np.floataa ) / 255.0
__a =image.transpose(0 , 3 , 1 , 2 )
__a =2.0 * image - 1.0
__a =torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return image
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
__a , __a =mask[0].size
__a , __a =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__a =np.concatenate(_snake_case , axis=0 )
__a =mask.astype(np.floataa ) / 255.0
__a =0
__a =1
__a =torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return mask
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case , __snake_case = 250 , __snake_case = 0.0 , __snake_case = 10 , __snake_case = 10 , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__a =image
__a =_preprocess_image(__snake_case )
__a =original_image.to(device=self.device , dtype=self.unet.dtype )
__a =_preprocess_mask(__snake_case )
__a =mask_image.to(device=self.device , dtype=self.unet.dtype )
__a =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__a =original_image.shape
__a =randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device )
__a =eta
__a =self.scheduler.timesteps[0] + 1
__a =generator[0] if isinstance(__snake_case , __snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__a =self.unet(__snake_case , __snake_case ).sample
# compute previous image: x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__a =self.scheduler.undo_step(__snake_case , __snake_case , __snake_case )
__a =t
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 218
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CLIPTokenizer
lowerCamelCase__ = CLIPTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {}
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
# fmt: off
_lowerCamelCase : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCamelCase : Any = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_lowerCamelCase : List[Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : str = 'lower newer'
_lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : str = 'lower newer'
_lowerCamelCase : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_lowerCamelCase : List[str] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : str = tokens + [tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
@require_ftfy
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Optional[Any] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_lowerCamelCase : Union[str, Any] = tokenizer_s.tokenize(lowercase )
_lowerCamelCase : Tuple = tokenizer_r.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Optional[int] = 'xa\u0303y' + ' ' + 'x\xe3y'
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(lowercase )
_lowerCamelCase : Optional[int] = tokenizer_r.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : str = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Tuple = tokenizer_s.tokenize(lowercase )
_lowerCamelCase : int = tokenizer_r.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Optional[int] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : Dict = tokenizer_s.tokenize(lowercase )
_lowerCamelCase : List[str] = tokenizer_r.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : Any = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , )
_lowerCamelCase : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : List[Any] = F''' {text}'''
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , )
_lowerCamelCase : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
def A_ ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def A_ ( self ):
super().test_tokenization_python_rust_equals()
def A_ ( self ):
# CLIP always lower cases letters
pass
| 356
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass
| 12
| 0
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _A ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "xlm-prophetnet"
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["past_key_values"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , __UpperCAmelCase = 0.1 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 30_522 , __UpperCAmelCase = 1_024 , __UpperCAmelCase = 4_096 , __UpperCAmelCase = 12 , __UpperCAmelCase = 16 , __UpperCAmelCase = 4_096 , __UpperCAmelCase = 12 , __UpperCAmelCase = 16 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 512 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 0 , __UpperCAmelCase = 2 , __UpperCAmelCase = 32 , __UpperCAmelCase = 128 , __UpperCAmelCase = False , __UpperCAmelCase = 0.0 , __UpperCAmelCase = True , __UpperCAmelCase = 0 , __UpperCAmelCase = 1 , __UpperCAmelCase = 2 , **__UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = encoder_ffn_dim
__UpperCAmelCase : Tuple = num_encoder_layers
__UpperCAmelCase : str = num_encoder_attention_heads
__UpperCAmelCase : Dict = decoder_ffn_dim
__UpperCAmelCase : List[Any] = num_decoder_layers
__UpperCAmelCase : str = num_decoder_attention_heads
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = init_std # Normal(0, this parameter)
__UpperCAmelCase : Tuple = activation_function
# parameters for xlmprophetnet
__UpperCAmelCase : List[Any] = ngram
__UpperCAmelCase : Optional[int] = num_buckets
__UpperCAmelCase : List[str] = relative_max_distance
__UpperCAmelCase : List[Any] = disable_ngram_loss
__UpperCAmelCase : Tuple = eps
# 3 Types of Dropout
__UpperCAmelCase : Any = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : Optional[int] = dropout
__UpperCAmelCase : Optional[Any] = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 254
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE (_UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase = LayoutLMTokenizer
lowerCAmelCase = LayoutLMTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = '''UNwant\u00E9d,running'''
__A : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.tokenizer_class(self.vocab_file)
__A : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
| 352
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 190
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase = False
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : Dict = pipe(
image=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ,).images
lowerCAmelCase__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Tuple = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 37
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__(self : Union[str, Any] , _A : List[str]=None , **_A : Tuple) -> Tuple:
super().__init__(features=_A)
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowercase (self : Tuple , _A : int) -> Any:
import torch
if isinstance(_A , _A) and column:
if all(
isinstance(_A , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(_A)
return column
def _lowercase (self : Dict , _A : int) -> Optional[Any]:
import torch
if isinstance(_A , (str, bytes, type(_A))):
return value
elif isinstance(_A , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__snake_case : Any = {}
if isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
__snake_case : Any = {'dtype': torch.intaa}
elif isinstance(_A , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__snake_case : List[str] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image):
__snake_case : Any = np.asarray(_A)
return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs})
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> Union[str, Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(_A , '__array__') and not isinstance(_A , torch.Tensor):
__snake_case : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
elif isinstance(_A , (list, tuple)):
return self._consolidate([self.recursive_tensorize(_A) for substruct in data_struct])
return self._tensorize(_A)
def _lowercase (self : Union[str, Any] , _A : dict) -> Optional[int]:
return map_nested(self._recursive_tensorize , _A , map_list=_A)
def _lowercase (self : Optional[Any] , _A : pa.Table) -> Mapping:
__snake_case : Optional[int] = self.numpy_arrow_extractor().extract_row(_A)
__snake_case : str = self.python_features_decoder.decode_row(_A)
return self.recursive_tensorize(_A)
def _lowercase (self : int , _A : pa.Table) -> "torch.Tensor":
__snake_case : List[str] = self.numpy_arrow_extractor().extract_column(_A)
__snake_case : Dict = self.python_features_decoder.decode_column(_A , pa_table.column_names[0])
__snake_case : Any = self.recursive_tensorize(_A)
__snake_case : Optional[int] = self._consolidate(_A)
return column
def _lowercase (self : Dict , _A : pa.Table) -> Mapping:
__snake_case : Tuple = self.numpy_arrow_extractor().extract_batch(_A)
__snake_case : str = self.python_features_decoder.decode_batch(_A)
__snake_case : Dict = self.recursive_tensorize(_A)
for column_name in batch:
__snake_case : Optional[Any] = self._consolidate(batch[column_name])
return batch
| 95
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a : int= NewType("DataClass", Any)
_a : Dict= NewType("DataClassType", Any)
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __UpperCAmelCase ( UpperCAmelCase_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
__snake_case : str = {str(UpperCAmelCase_ ): choice for choice in choices}
return lambda UpperCAmelCase_ : str_to_choice.get(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( *,
UpperCAmelCase_ : Union[str, List[str]] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Any = dataclasses.MISSING , UpperCAmelCase_ : Callable[[], Any] = dataclasses.MISSING , UpperCAmelCase_ : dict = None , **UpperCAmelCase_ : str , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case : Optional[Any] = {}
if aliases is not None:
__snake_case : Optional[int] = aliases
if help is not None:
__snake_case : Optional[int] = help
return dataclasses.field(metadata=UpperCAmelCase_ , default=UpperCAmelCase_ , default_factory=UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Iterable[DataClassType]
def __init__(self : Tuple , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : int) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__snake_case : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_A)
if dataclasses.is_dataclass(_A):
__snake_case : Optional[int] = [dataclass_types]
__snake_case : Dict = list(_A)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A)
@staticmethod
def _lowercase (_A : ArgumentParser , _A : dataclasses.Field) -> Tuple:
__snake_case : Union[str, Any] = f"--{field.name}"
__snake_case : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default')
__snake_case : Any = kwargs.pop('aliases' , [])
if isinstance(_A , _A):
__snake_case : Optional[Any] = [aliases]
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
if origin_type is Union or (hasattr(_A , 'UnionType') and isinstance(_A , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(_A) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f" Problem encountered in field '{field.name}'.")
if type(_A) not in field.type.__args__:
# filter `str` in Union
__snake_case : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case : Optional[int] = getattr(field.type , '__origin__' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case : Optional[Any] = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1]
)
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)):
if origin_type is Literal:
__snake_case : Tuple = field.type.__args__
else:
__snake_case : Dict = [x.value for x in field.type]
__snake_case : Dict = make_choice_type_function(kwargs['choices'])
if field.default is not dataclasses.MISSING:
__snake_case : Dict = field.default
else:
__snake_case : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case : Tuple = copy(_A)
# Hack because type=bool in argparse does not behave as we want.
__snake_case : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
__snake_case : List[str] = True
elif isclass(_A) and issubclass(_A , _A):
__snake_case : str = field.type.__args__[0]
__snake_case : Any = '+'
if field.default_factory is not dataclasses.MISSING:
__snake_case : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case : Any = True
else:
__snake_case : Tuple = field.type
if field.default is not dataclasses.MISSING:
__snake_case : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case : List[Any] = field.default_factory()
else:
__snake_case : Union[str, Any] = True
parser.add_argument(_A , *_A , **_A)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case : List[str] = False
parser.add_argument(f"--no_{field.name}" , action='store_false' , dest=field.name , **_A)
def _lowercase (self : List[Any] , _A : DataClassType) -> Optional[int]:
if hasattr(_A , '_argument_group_name'):
__snake_case : Union[str, Any] = self.add_argument_group(dtype._argument_group_name)
else:
__snake_case : int = self
try:
__snake_case : Dict[str, type] = get_type_hints(_A)
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A):
__snake_case : Union[str, Any] = '.'.join(map(_A , sys.version_info[:3]))
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.') from ex
raise
for field in dataclasses.fields(_A):
if not field.init:
continue
__snake_case : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(_A , _A)
def _lowercase (self : Union[str, Any] , _A : List[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : List[Any]=None , _A : str=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__snake_case : Any = []
if args_filename:
args_files.append(Path(_A))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('.args'))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case : int = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='append')
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case : int = args_file_parser.parse_known_args(args=_A)
__snake_case : int = vars(_A).get(args_file_flag.lstrip('-') , _A)
if cmd_args_file_paths:
args_files.extend([Path(_A) for p in cmd_args_file_paths])
__snake_case : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case : Tuple = self.parse_known_args(args=_A)
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[Any] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : List[str] = {k: v for k, v in vars(_A).items() if k in keys}
for k in keys:
delattr(_A , _A)
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(_A)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
return (*outputs,)
def _lowercase (self : Tuple , _A : Dict[str, Any] , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : List[Any] = set(args.keys())
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[str] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(_A)}")
return tuple(_A)
def _lowercase (self : int , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
with open(Path(_A) , encoding='utf-8') as open_json_file:
__snake_case : int = json.loads(open_json_file.read())
__snake_case : Optional[int] = self.parse_dict(_A , allow_extra_keys=_A)
return tuple(_A)
def _lowercase (self : List[str] , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : Dict = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A)
return tuple(_A)
| 95
| 1
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1_500_000) -> int:
'''simple docstring'''
__UpperCamelCase : defaultdict = defaultdict(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _lowerCamelCase , 2):
if gcd(_lowerCamelCase , _lowerCamelCase) > 1:
continue
__UpperCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_lowerCamelCase , limit + 1 , _lowerCamelCase):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1)
if __name__ == "__main__":
print(f"{solution() = }")
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[Any] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 151
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """embed_dim"""))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_heads"""))
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=[1_6, 4_8, 9_6] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = patch_stride
__SCREAMING_SNAKE_CASE = patch_padding
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = stride_kv
__SCREAMING_SNAKE_CASE = depth
__SCREAMING_SNAKE_CASE = cls_token
__SCREAMING_SNAKE_CASE = attention_drop_rate
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
# create a random int32 tensor of given shape
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFCvtModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , training=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth)):
__SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : List[Any] = False
__lowercase : Any = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtModelTester(self)
__SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def snake_case_ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case_ ( self):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case_ ( self):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = len(self.model_tester.depth)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""")
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4))
| 100
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 100
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
A : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
__lowerCamelCase : Optional[str] = field(default=a__ , metadata={'''help''': '''A folder containing the training data.'''} )
__lowerCamelCase : Optional[str] = field(default=a__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__lowerCamelCase : int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
__lowerCamelCase : float = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
__lowerCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = {}
if self.train_dir is not None:
A__ = self.train_dir
if self.validation_dir is not None:
A__ = self.validation_dir
A__ = data_files if data_files else None
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : str = field(
default=a__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )} , )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__lowerCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
__lowerCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCamelCase : str = field(default=a__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__lowerCamelCase : bool = field(
default=a__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=a__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class A :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCAmelCase : Any=1_92 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : List[str]=0.6 ) -> Optional[Any]:
"""simple docstring"""
A__ = input_size
A__ = mask_patch_size
A__ = model_patch_size
A__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
A__ = self.input_size // self.mask_patch_size
A__ = self.mask_patch_size // self.model_patch_size
A__ = self.rand_size**2
A__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[Any] ) -> str:
"""simple docstring"""
A__ = np.random.permutation(self.token_count )[: self.mask_count]
A__ = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
A__ = 1
A__ = mask.reshape((self.rand_size, self.rand_size) )
A__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = torch.stack([example["""pixel_values"""] for example in examples] )
A__ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
A__ = ds['train'].train_test_split(data_args.train_val_split )
A__ = split['train']
A__ = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , """decoder_type""" ):
A__ = 'simmim'
# adapt config
A__ = model_args.image_size if model_args.image_size is not None else config.image_size
A__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
A__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
A__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
A__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
A__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
A__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
A__ = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
A__ = ds['train'].column_names
else:
A__ = ds['validation'].column_names
if data_args.image_column_name is not None:
A__ = data_args.image_column_name
elif "image" in column_names:
A__ = 'image'
elif "img" in column_names:
A__ = 'img'
else:
A__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
A__ = Compose(
[
Lambda(lambda __a : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
A__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__a :List[Any] ):
A__ = [transforms(snake_case__ ) for image in examples[image_column_name]]
A__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
A__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
A__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
A__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
# Write model card and (optionally) push to hub
A__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 365
|
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=16 , __magic_name__=13 , __magic_name__=7 , __magic_name__=14 , __magic_name__=10 , __magic_name__=19 , __magic_name__=5 , __magic_name__=4 , __magic_name__=True , __magic_name__=16 , __magic_name__=2 , __magic_name__=4 , __magic_name__=4 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=[1, 2, 3, 4, 5] , __magic_name__=25 , __magic_name__=5 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = d_model
snake_case_ : Optional[Any] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Dict = prediction_length
snake_case_ : Optional[Any] = context_length
snake_case_ : Any = cardinality
snake_case_ : int = num_time_features
snake_case_ : Optional[int] = lags_sequence
snake_case_ : Optional[Any] = embedding_dimension
snake_case_ : Any = is_training
snake_case_ : List[Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = context_length
snake_case_ : Tuple = prediction_length + label_length
snake_case_ : int = label_length
snake_case_ : Optional[int] = moving_average
snake_case_ : Optional[int] = autocorrelation_factor
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = config.context_length + max(config.lags_sequence )
snake_case_ : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_config()
snake_case_ : Dict = self.prepare_autoformer_inputs_dict(__magic_name__ )
return config, inputs_dict
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoformerModel(config=__magic_name__ ).to(__magic_name__ ).eval()
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : Optional[Any] = outputs.encoder_last_hidden_state
snake_case_ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = AutoformerEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = model.create_network_inputs(**__magic_name__ )
snake_case_ , snake_case_ : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : str = encoder(inputs_embeds=__magic_name__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
snake_case_ : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : int = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
snake_case_ : List[Any] = AutoformerDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
snake_case_ : Any = decoder(
trend=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase_ : str = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase_ : Dict = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : str = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Tuple = False
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = AutoformerModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
snake_case_ , snake_case_ : str = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info['''missing_keys'''] , [] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(__magic_name__ , '''forward''' ) )
# The main input is the name of the argument after `self`
snake_case_ : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(__magic_name__ )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Union[str, Any] = [*signature.parameters.keys()]
snake_case_ : Tuple = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(__magic_name__ )] , __magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : Union[str, Any] = getattr(self.model_tester , '''seq_length''' , __magic_name__ )
snake_case_ : Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , __magic_name__ )
snake_case_ : Optional[Any] = getattr(self.model_tester , '''encoder_seq_length''' , __magic_name__ )
snake_case_ : Tuple = getattr(self.model_tester , '''d_model''' , __magic_name__ )
snake_case_ : List[str] = getattr(self.model_tester , '''num_attention_heads''' , __magic_name__ )
snake_case_ : int = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Dict = True
snake_case_ : str = False
snake_case_ : Any = True
snake_case_ : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
snake_case_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : str = True
snake_case_ : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
snake_case_ : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : List[str] = len(__magic_name__ )
snake_case_ : Union[str, Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__magic_name__ , __magic_name__ )
# decoder attentions
snake_case_ : Any = outputs.decoder_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Any = True
snake_case_ : List[Any] = True
snake_case_ : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 2 , len(__magic_name__ ) )
snake_case_ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( _UpperCamelCase="train-batch.pt" ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_UpperCamelCase , repo_type='''dataset''' )
snake_case_ : List[str] = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__magic_name__ )
snake_case_ : Union[str, Any] = prepare_batch()
with torch.no_grad():
snake_case_ : str = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
snake_case_ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : Optional[int] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__magic_name__ )
snake_case_ : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
snake_case_ : Optional[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__magic_name__ )
snake_case_ : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case_ : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
snake_case_ : Any = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __magic_name__ )
snake_case_ : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__magic_name__ )
snake_case_ : Any = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __magic_name__ , rtol=1e-1 ) )
| 279
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279
| 1
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A = True , A = None , A = 32 , A = True , A = 1 / 255 , A = True , A = True , A = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , A = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , A = True , A=7 , A=30 , A=400 , A=3 , ) -> str:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
UpperCAmelCase : List[str] = size_divisor
UpperCAmelCase : List[str] = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : List[Any] = do_center_crop
UpperCAmelCase : Optional[int] = image_mean
UpperCAmelCase : List[str] = image_std
UpperCAmelCase : List[str] = do_pad
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : str = min_resolution
UpperCAmelCase : List[Any] = max_resolution
def _lowercase( self ) -> int:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase( self , A , A=False ) -> Tuple:
if not batched:
UpperCAmelCase : Dict = self.size["""shortest_edge"""]
UpperCAmelCase : Any = image_inputs[0]
if isinstance(A , Image.Image ):
UpperCAmelCase : Union[str, Any] = image.size
else:
UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2]
UpperCAmelCase : List[Any] = size / min(A , A )
if h < w:
UpperCAmelCase : List[str] = size, scale * w
else:
UpperCAmelCase : List[Any] = scale * h, size
UpperCAmelCase : str = int((1333 / 800) * size )
if max(A , A ) > max_size:
UpperCAmelCase : int = max_size / max(A , A )
UpperCAmelCase : Union[str, Any] = newh * scale
UpperCAmelCase : Dict = neww * scale
UpperCAmelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase : List[str] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase : Tuple = []
for image in image_inputs:
UpperCAmelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Optional[int] = max(A , key=lambda A : item[0] )[0]
UpperCAmelCase : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = BridgeTowerImageProcessingTester(self )
@property
def _lowercase( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """size_divisor""" ) )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> str:
# Initialize image processor
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : int = image_processing(A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase( self ) -> Union[str, Any]:
# Initialize image processor
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Union[str, Any] = image_processing(A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase( self ) -> List[str]:
# Initialize image processor
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Union[str, Any] = image_processing(A , return_tensors="""pt""" ).pixel_values
UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 367
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'levit'
def __init__( self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.0_2 , **A , ) -> int:
super().__init__(**A )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : Optional[int] = stride
UpperCAmelCase : Dict = padding
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : Any = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 338
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=7 , lowerCamelCase : Dict=3 , lowerCamelCase : List[Any]=18 , lowerCamelCase : Any=30 , lowerCamelCase : Union[str, Any]=4_00 , lowerCamelCase : Any=True , lowerCamelCase : Any=None , lowerCamelCase : Tuple=True , ) -> Optional[int]:
lowerCAmelCase_ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : List[Any] = min_resolution
lowerCAmelCase_ : Union[str, Any] = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[int] = size
lowerCAmelCase_ : Optional[Any] = apply_ocr
def __lowercase ( self : int ) -> str:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def __lowercase ( self : Union[str, Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[str] ) -> List[str]:
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """apply_ocr""" ) )
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowercase ( self : Any ) -> Tuple:
pass
def __lowercase ( self : Tuple ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase )
self.assertIsInstance(encoding.boxes , lowerCamelCase )
# Test batched
lowerCAmelCase_ : Any = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : int ) -> List[str]:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Optional[Any] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : Any ) -> Union[str, Any]:
# with apply_OCR = True
lowerCAmelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase_ : int = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase_ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase_ : str = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase_ : Optional[int] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase )
self.assertListEqual(encoding.boxes , lowerCamelCase )
# with apply_OCR = False
lowerCAmelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase )
lowerCAmelCase_ : Any = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 120
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__A : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__A : List[Any] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__A : str = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : str ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]="binary" , lowerCamelCase : Any=None , lowerCamelCase : str="warn" , ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 120
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase_ ( a_ , a_ ):
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TimmBackboneConfig
def __init__( self : Optional[Any] ,__lowerCamelCase : str ,**__lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(self ,'''timm''' )
super().__init__(__lowerCamelCase )
a = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__lowerCamelCase ,'''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
a = getattr(__lowerCamelCase ,'''use_pretrained_backbone''' ,__lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
a = config.out_indices if getattr(__lowerCamelCase ,'''out_indices''' ,__lowerCamelCase ) is not None else (-1,)
a = timm.create_model(
config.backbone ,pretrained=__lowerCamelCase ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=__lowerCamelCase ,**__lowerCamelCase ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a = self._backbone.return_layers
a = {layer['''module''']: str(__lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ,__lowerCamelCase : Optional[Any] ,*__lowerCamelCase : List[Any] ,**__lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls ,['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
a = kwargs.pop('''config''' ,TimmBackboneConfig() )
a = kwargs.pop('''use_timm_backbone''' ,__lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
a = kwargs.pop('''num_channels''' ,config.num_channels )
a = kwargs.pop('''features_only''' ,config.features_only )
a = kwargs.pop('''use_pretrained_backbone''' ,config.use_pretrained_backbone )
a = kwargs.pop('''out_indices''' ,config.out_indices )
a = TimmBackboneConfig(
backbone=__lowerCamelCase ,num_channels=__lowerCamelCase ,features_only=__lowerCamelCase ,use_pretrained_backbone=__lowerCamelCase ,out_indices=__lowerCamelCase ,)
return super()._from_config(__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=None ,**__lowerCamelCase : Dict ):
'''simple docstring'''
a = return_dict if return_dict is not None else self.config.use_return_dict
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a = self._all_layers
a = self._backbone(__lowerCamelCase ,**__lowerCamelCase )
a = self._return_layers
a = tuple(hidden_states[i] for i in self.out_indices )
else:
a = self._backbone(__lowerCamelCase ,**__lowerCamelCase )
a = None
a = tuple(__lowerCamelCase )
a = tuple(__lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
a = (feature_maps,)
if output_hidden_states:
a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowerCamelCase ,hidden_states=__lowerCamelCase ,attentions=__lowerCamelCase )
| 330
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase__ : Optional[Any] = """bert-base-cased"""
UpperCamelCase__ : int = """fp16"""
UpperCamelCase__ : str = """bf16"""
UpperCamelCase__ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
a = dict(
ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = F"""{i + 1}"""
a = strategy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = prefetch_policy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = state_dict_type
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = AutoModel.from_pretrained(__lowerCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
a = self.dist_env.copy()
a = policy
if policy == "TRANSFORMER_BASED_WRAP":
a = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
a = '''2000'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
a = self.dist_env.copy()
a = '''TRANSFORMER_BASED_WRAP'''
a = '''T5Layer'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
a = self.dist_env.copy()
a = '''SIZE_BASED_WRAP'''
a = '''0'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
a = self.dist_env.copy()
a = mp_dtype
with mockenv_context(**__lowerCamelCase ):
a = Accelerator()
if mp_dtype == "fp16":
a = torch.floataa
elif mp_dtype == "bf16":
a = torch.bfloataa
a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
a = self.dist_env.copy()
a = str(__lowerCamelCase ).lower()
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a = 0.82
a = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
a = {
'''multi_gpu_fp16''': 32_00,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
a = 1_60
a = 1_60
a = inspect.getfile(accelerate.test_utils )
a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' )
a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
a = cmd.copy()
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__lowerCamelCase ):
a = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
a = len(__lowerCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
a = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
a = cmd_config[:-1]
a = os.path.join(self.tmpdir ,'''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
a = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
| 330
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None ) -> Tuple:
if attention_mask is None:
_lowerCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
__lowerCAmelCase : Tuple = OPTConfig
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : int = 'gelu'
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=9_9 , snake_case_=1_6 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=2_0 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=1_6 , snake_case_=1_6 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[Any] = eos_token_id
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : int = bos_token_id
_lowerCAmelCase : Any = embed_dim
_lowerCAmelCase : List[Any] = word_embed_proj_dim
_lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCamelCase_ , **self.config_updates , )
_lowerCAmelCase : Any = prepare_opt_inputs_dict(UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = TFOPTModel(config=UpperCamelCase_ )
_lowerCAmelCase : int = inputs_dict["""input_ids"""]
_lowerCAmelCase : List[str] = input_ids[:1, :]
_lowerCAmelCase : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase : Any = 1
# first forward pass
_lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
_lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
@require_tf
class a_ (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCAmelCase : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase : str = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Any = 1_0
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = TFOPTModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case_ , snake_case_ ):
if hasattr(UpperCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_lowerCAmelCase : Optional[int] = model_class(config=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = _get_word_embedding_weight(UpperCamelCase_ , model.get_input_embeddings() )
_lowerCAmelCase : List[str] = _get_word_embedding_weight(UpperCamelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCamelCase_ )
_lowerCAmelCase : List[str] = _get_word_embedding_weight(UpperCamelCase_ , model.get_input_embeddings() )
_lowerCAmelCase : Any = _get_word_embedding_weight(UpperCamelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCAmelCase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCamelCase_ )
# check that weights remain the same after resizing
_lowerCAmelCase : Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase : Optional[Any] = False
self.assertTrue(UpperCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase : Any = False
self.assertTrue(UpperCamelCase_ )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Any:
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class a_ (unittest.TestCase ):
__lowerCAmelCase : Dict = 9_9
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCAmelCase : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCAmelCase : Optional[int] = input_ids.shape[0]
_lowerCAmelCase : Tuple = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
_lowerCAmelCase : str = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCAmelCase : Dict = tf.not_equal(UpperCamelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowerCAmelCase : Dict = model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ).last_hidden_state
_lowerCAmelCase : Tuple = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowerCAmelCase : int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=4E-3 ) )
_lowerCAmelCase : Optional[int] = tf.function(UpperCamelCase_ , jit_compile=UpperCamelCase_ )
_lowerCAmelCase : Tuple = xla_generate(UpperCamelCase_ , UpperCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=4E-2 ) )
@require_tf
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().setUp()
_lowerCAmelCase : Union[str, Any] = """facebook/opt-350m"""
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCAmelCase : Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors="""tf""" , padding=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-4 ) )
_lowerCAmelCase : int = tf.function(UpperCamelCase_ , jit_compile=UpperCamelCase_ )
_lowerCAmelCase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-4 ) )
@require_tf
@slow
class a_ (unittest.TestCase ):
@property
def __UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """facebook/opt-125m"""
_lowerCAmelCase : Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
for prompt in self.prompts:
_lowerCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[Any] = model.generate(UpperCamelCase_ , max_length=1_0 )
_lowerCAmelCase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = """facebook/opt-350m"""
_lowerCAmelCase : str = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Any = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Dict = """left"""
# use different length sentences to test batching
_lowerCAmelCase : int = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors="""tf""" , padding=UpperCamelCase_ )
_lowerCAmelCase : Any = inputs["""input_ids"""]
_lowerCAmelCase : int = model.generate(input_ids=UpperCamelCase_ , attention_mask=inputs["""attention_mask"""] )
_lowerCAmelCase : Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Dict = model.generate(input_ids=UpperCamelCase_ )
_lowerCAmelCase : int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
_lowerCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[int] = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : Dict = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """facebook/opt-350m"""
_lowerCAmelCase : str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : str = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
for prompt in self.prompts:
_lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_length=1_0 )
_lowerCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 309
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
__lowerCAmelCase :ClassVar[Features] = Features({"audio": Audio()} )
__lowerCAmelCase :ClassVar[Features] = Features({"transcription": Value("string" )} )
__lowerCAmelCase :str = "audio"
__lowerCAmelCase :str = "transcription"
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a__ : Tuple = copy.deepcopy(self )
a__ : Any = self.input_schema.copy()
a__ : int = features[self.audio_column]
a__ : str = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 266
|
from string import ascii_uppercase
_lowercase : str ={char: i for i, char in enumerate(ascii_uppercase)}
_lowercase : Dict =dict(enumerate(ascii_uppercase))
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str:
"""simple docstring"""
a__ : Any = len(_lowercase)
a__ : Optional[int] = 0
while True:
if x == i:
a__ : Optional[Any] = 0
if len(_lowercase) == len(_lowercase):
break
key += key[i]
i += 1
return key
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str:
"""simple docstring"""
a__ : Tuple = """"""
a__ : str = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a__ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str:
"""simple docstring"""
a__ : int = """"""
a__ : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a__ : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : List[Any] = """THE GERMAN ATTACK"""
a__ : List[Any] = """SECRET"""
a__ : Tuple = generate_key(_lowercase , _lowercase)
a__ : str = cipher_text(_lowercase , _lowercase)
print(F'''Encrypted Text = {s}''')
print(F'''Original Text = {original_text(_lowercase , _lowercase)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 266
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
__lowerCamelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__UpperCAmelCase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : str = "ResNetConfig"
# Base docstring
_lowerCamelCase : Tuple = "microsoft/resnet-50"
_lowerCamelCase : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_lowerCamelCase : Optional[int] = "microsoft/resnet-50"
_lowerCamelCase : int = "tiger cat"
_lowerCamelCase : str = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __snake_case (nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "relu" ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , bias=_UpperCAmelCase )
_lowerCAmelCase : Tuple = nn.BatchNormad(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Tensor ) -> Tensor:
'''simple docstring'''
_lowerCAmelCase : str = self.convolution(_UpperCAmelCase )
_lowerCAmelCase : Any = self.normalization(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.activation(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : Optional[Any] , _UpperCAmelCase : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowerCAmelCase : str = config.num_channels
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Tensor ) -> Tensor:
'''simple docstring'''
_lowerCAmelCase : int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : str = self.embedder(_UpperCAmelCase )
_lowerCAmelCase : List[str] = self.pooler(_UpperCAmelCase )
return embedding
class __snake_case (nn.Module ):
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = nn.BatchNormad(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Tensor ) -> Tensor:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.convolution(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.normalization(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "relu" ) -> Dict:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : str = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , activation=_UpperCAmelCase ) , )
_lowerCAmelCase : Optional[int] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = hidden_state
_lowerCAmelCase : int = self.layer(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
_lowerCAmelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 4 ) -> str:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[Any] = out_channels // reduction
_lowerCAmelCase : Tuple = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Dict = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
_lowerCAmelCase : Optional[Any] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = hidden_state
_lowerCAmelCase : List[str] = self.layer(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
_lowerCAmelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : Dict , _UpperCAmelCase : ResNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_lowerCAmelCase : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , activation=config.hidden_act ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Tensor ) -> Tensor:
'''simple docstring'''
_lowerCAmelCase : Tuple = input
for layer in self.layers:
_lowerCAmelCase : Any = layer(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : Optional[Any] , _UpperCAmelCase : ResNetConfig ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_lowerCAmelCase : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Any = hidden_states + (hidden_state,)
_lowerCAmelCase : List[Any] = stage_module(_UpperCAmelCase )
if output_hidden_states:
_lowerCAmelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase , )
class __snake_case (_a ):
lowerCAmelCase__ = ResNetConfig
lowerCAmelCase__ = "resnet"
lowerCAmelCase__ = "pixel_values"
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=False ) -> Any:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : Tuple = value
_lowerCamelCase : List[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCamelCase : Any = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , _a , )
class __snake_case (_a ):
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
_lowerCAmelCase : Dict = config
_lowerCAmelCase : int = ResNetEmbeddings(_UpperCAmelCase )
_lowerCAmelCase : Any = ResNetEncoder(_UpperCAmelCase )
_lowerCAmelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[str] = self.embedder(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
class __snake_case (_a ):
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
_lowerCAmelCase : Any = config.num_labels
_lowerCAmelCase : Optional[int] = ResNetModel(_UpperCAmelCase )
# classification head
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[str] = self.resnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
_lowerCAmelCase : Tuple = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : str = self.classifier(_UpperCAmelCase )
_lowerCAmelCase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Union[str, Any] = """single_label_classification"""
else:
_lowerCAmelCase : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : int = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Tuple = CrossEntropyLoss()
_lowerCAmelCase : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : str = BCEWithLogitsLoss()
_lowerCAmelCase : str = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
_lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _a , )
class __snake_case (_a , _a ):
def __init__( self : str , _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
super()._init_backbone(_UpperCAmelCase )
_lowerCAmelCase : List[str] = [config.embedding_size] + config.hidden_sizes
_lowerCAmelCase : Union[str, Any] = ResNetEmbeddings(_UpperCAmelCase )
_lowerCAmelCase : int = ResNetEncoder(_UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Union[str, Any] = self.embedder(_UpperCAmelCase )
_lowerCAmelCase : int = self.encoder(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : str = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowerCAmelCase : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_UpperCAmelCase , )
| 159
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Model type selected in the list: " + ", ".join(_a )} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase__ = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase__ = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __snake_case (_a ):
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
class __snake_case (_a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Tuple , _UpperCAmelCase : SquadDataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Union[str, Split] = Split.train , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = args
_lowerCAmelCase : Union[str, Any] = is_language_sensitive
_lowerCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_lowerCAmelCase : List[str] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase : List[str] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : List[Any] = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : Union[str, Any] = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Any = torch.load(_UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : List[str] = self.old_features["""features"""]
_lowerCAmelCase : List[Any] = self.old_features.get("""dataset""" , _UpperCAmelCase )
_lowerCAmelCase : int = self.old_features.get("""examples""" , _UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase : List[str] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Any , _UpperCAmelCase : Dict ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_lowerCAmelCase : str = self.features[i]
_lowerCAmelCase : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase : int = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 159
| 1
|
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
super().__init__(*_snake_case , **_snake_case )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict ):
__lowercase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
__lowercase : List[Any] = self.values[key]
def snake_case_ ( self : Any ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 156
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , ):
__snake_case : Dict = parent
__snake_case : str = 13
__snake_case : Any = 7
__snake_case : int = True
__snake_case : List[str] = True
__snake_case : List[str] = True
__snake_case : Dict = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = False
__snake_case : Dict = False
__snake_case : Optional[int] = False
__snake_case : Optional[int] = 2
__snake_case : Tuple = 99
__snake_case : Any = 0
__snake_case : Union[str, Any] = 32
__snake_case : str = 2
__snake_case : Dict = 4
__snake_case : Union[str, Any] = 0.1
__snake_case : Union[str, Any] = 0.1
__snake_case : Any = 5_12
__snake_case : List[Any] = 16
__snake_case : int = 2
__snake_case : str = 0.02
__snake_case : Optional[int] = 3
__snake_case : Tuple = 4
__snake_case : Optional[int] = """last"""
__snake_case : str = True
__snake_case : List[str] = None
__snake_case : str = 0
def snake_case__ ( self : Dict ):
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__snake_case : List[str] = None
if self.use_input_lengths:
__snake_case : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : Dict = None
__snake_case : List[Any] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Tuple = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , ):
__snake_case : Union[str, Any] = TFFlaubertModel(config=_lowerCAmelCase )
__snake_case : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__snake_case : Optional[Any] = model(_lowerCAmelCase )
__snake_case : Optional[Any] = [input_ids, input_mask]
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , ):
__snake_case : str = TFFlaubertWithLMHeadModel(_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , ):
__snake_case : List[str] = TFFlaubertForQuestionAnsweringSimple(_lowerCAmelCase )
__snake_case : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
__snake_case : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , ):
__snake_case : Tuple = TFFlaubertForSequenceClassification(_lowerCAmelCase )
__snake_case : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
__snake_case : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , ):
__snake_case : Any = self.num_labels
__snake_case : Tuple = TFFlaubertForTokenClassification(config=_lowerCAmelCase )
__snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , ):
__snake_case : List[str] = self.num_choices
__snake_case : Union[str, Any] = TFFlaubertForMultipleChoice(config=_lowerCAmelCase )
__snake_case : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : List[Any] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Union[str, Any] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Optional[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int ):
__snake_case : Any = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Tuple = config_and_inputs
__snake_case : Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : str = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[int] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : List[Any] = False
def snake_case__ ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self : Dict ):
__snake_case : Any = TFFlaubertModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=37 )
def snake_case__ ( self : int ):
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_lowerCAmelCase )
@slow
def snake_case__ ( self : List[Any] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = TFFlaubertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : List[Any] ):
__snake_case : Union[str, Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__snake_case : Any = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__snake_case : List[Any] = model(_lowerCAmelCase )[0]
__snake_case : Union[str, Any] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
__snake_case : List[str] = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 20
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : List[str] = projection_dim
lowerCAmelCase : Tuple = position_embedding_type
| 133
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) )
| 133
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
pass
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any ) -> None:
lowerCAmelCase_ : Any = data
lowerCAmelCase_ : Node | None = None
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = self
lowerCAmelCase_ : Any = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase )
yield node.data
lowerCAmelCase_ : int = node.next_node
@property
def __lowercase ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__A : Dict = Node(1)
__A : Optional[Any] = Node(2)
__A : int = Node(3)
__A : Optional[Any] = Node(4)
print(root_node.has_loop) # False
__A : Any = root_node.next_node
print(root_node.has_loop) # True
__A : List[Any] = Node(5)
__A : Dict = Node(6)
__A : str = Node(5)
__A : Dict = Node(6)
print(root_node.has_loop) # False
__A : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 120
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : Optional[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
"""simple docstring"""
lowercase = None
def UpperCamelCase_ ( A__ : "pyspark.sql.DataFrame" , A__ : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ : Union[str, Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ : int = df_with_partition_id.select("""*""" ).where(f'part_id = {partition_id}' ).drop("""part_id""" )
lowerCAmelCase_ : Union[str, Any] = partition_df.collect()
lowerCAmelCase_ : str = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[Any]=None , ) -> Optional[Any]:
lowerCAmelCase_ : str = df
lowerCAmelCase_ : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ) -> int:
yield from self.generate_examples_fn()
def __lowercase ( self : Any , lowerCamelCase : np.random.Generator ) -> "SparkExamplesIterable":
lowerCAmelCase_ : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> "SparkExamplesIterable":
lowerCAmelCase_ : Tuple = self.split_shard_indices_by_worker(lowerCamelCase , lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
@property
def __lowercase ( self : str ) -> int:
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
"""simple docstring"""
lowercase = SparkConfig
def __init__( self : Union[str, Any] , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : str = None , lowerCamelCase : str = None , **lowerCamelCase : int , ) -> Tuple:
import pyspark
lowerCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ : List[str] = df
lowerCAmelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=lowerCamelCase , config_name=str(self.df.semanticHash() ) , **lowerCamelCase , )
def __lowercase ( self : Tuple ) -> Union[str, Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase )
lowerCAmelCase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowercase ( self : Any ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : str , lowerCamelCase : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowercase ( self : Any , lowerCamelCase : Tuple ) -> Optional[Any]:
import pyspark
def get_arrow_batch_size(lowerCamelCase : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ : str = self.df.count()
lowerCAmelCase_ : Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ : str = (
self.df.limit(lowerCamelCase )
.repartition(1 )
.mapInArrow(lowerCamelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ : Tuple = min(lowerCamelCase , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ : Dict = self.df.repartition(lowerCamelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ : List[str] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ : int = os.path.join(self._working_dir , os.path.basename(lowerCamelCase ) ) if self._working_dir else fpath
lowerCAmelCase_ : Tuple = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ : str = self.config.features
lowerCAmelCase_ : Tuple = self._writer_batch_size
lowerCAmelCase_ : Any = self._fs.storage_options
def write_arrow(lowerCamelCase : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ : Union[str, Any] = next(lowerCamelCase , lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = writer_class(
features=lowerCamelCase , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_, lowerCAmelCase_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase )
if writer._num_bytes > 0:
lowerCAmelCase_, lowerCAmelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase ) ):
lowerCAmelCase_ : str = os.path.join(os.path.dirname(lowerCamelCase ) , os.path.basename(lowerCamelCase ) )
shutil.move(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = (
self.df.mapInArrow(lowerCamelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowercase ( self : Any , lowerCamelCase : "datasets.SplitGenerator" , lowerCamelCase : str = "arrow" , lowerCamelCase : Optional[Union[str, int]] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : List[str] , ) -> Optional[int]:
self._validate_cache_dir()
lowerCAmelCase_ : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ : Dict = os.path.join if is_local else posixpath.join
lowerCAmelCase_ : int = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ : Any = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowerCAmelCase_ : List[str] = path_join(self._output_dir , lowerCamelCase )
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Dict = []
for task_id, content in self._prepare_split_single(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = total_num_examples
lowerCAmelCase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
lowerCAmelCase_ : Optional[int] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , ):
rename(
lowerCamelCase , fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace("""TTTTT-SSSSS""" , F'{global_shard_id:05d}' ).replace("""NNNNN""" , F'{total_shards:05d}' ) , )
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = 0
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_, lowerCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase , len(lowerCamelCase ) ).map(lambda lowerCamelCase : _rename_shard(*lowerCamelCase ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace(lowerCamelCase , """""" ) , )
def __lowercase ( self : Dict , lowerCamelCase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 120
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCamelCase ( *lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , "r" ) as fh:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCamelCase__ )
finally:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device('''cuda''', local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 121
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float(moles / volume ) * nfactor )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
| 1
|
"""simple docstring"""
__A : str = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_UpperCAmelCase = Stack()
_UpperCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_UpperCAmelCase = operator_stack.peek()
operator_stack.pop()
_UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 362
|
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326
| 0
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A__ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
A__ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A__ = output[output != -float("inf" )]
A__ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-1_2 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class UpperCamelCase ( unittest.TestCase , _UpperCAmelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCAmelCase : List[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
A__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A__ = 2
A__ = 2
class UpperCamelCase ( tf.Module ):
def __init__( self , UpperCAmelCase__ ):
super(UpperCAmelCase__ , self ).__init__()
A__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
A__ = [[2, 0], [102, 103]]
A__ = [[1, 0], [1, 1]]
A__ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
A__ = tf.saved_model.load(UpperCAmelCase__ ).signatures["serving_default"]
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
A__ = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
A__ = serving_func(**UpperCAmelCase__ )["sequences"]
A__ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
A__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A__ = 1
A__ = 2
class UpperCamelCase ( tf.Module ):
def __init__( self , UpperCAmelCase__ ):
super(UpperCAmelCase__ , self ).__init__()
A__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
A__ = [[2], [102, 103]]
A__ = [[1], [1, 1]]
A__ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
A__ = tf.saved_model.load(UpperCAmelCase__ ).signatures["serving_default"]
for input_row in range(len(UpperCAmelCase__ ) ):
A__ = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
A__ = serving_func(**UpperCAmelCase__ )["sequences"]
A__ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def __A ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCAmelCase__ )
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
A__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , "spiece.model" ) , "rb" ).read() )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = self.tokenizer.tokenize(UpperCAmelCase__ )
A__ , A__ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A__ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
A__ = CompleteSentenceTransformer()
A__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
A__ = complete_model(UpperCAmelCase__ )
A__ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def __A ( self ):
# Has PT equivalent: this test relies on random sampling
A__ = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
A__ = 14
A__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A__ = "Hello, my dog is cute and"
A__ = tokenizer(UpperCAmelCase__ , return_tensors="tf" )
A__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
A__ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A__ = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
A__ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ):
# Has PT equivalent: ample use of framework-specific code
A__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
A__ = "Hugging Face is a technology company based in New York and Paris."
A__ = bart_tokenizer(UpperCAmelCase__ , return_tensors="tf" ).input_ids
A__ = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
A__ = bart_model.generate(UpperCAmelCase__ ).numpy()
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
A__ = bart_model.generate(UpperCAmelCase__ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class UpperCamelCase ( bart_model.model.encoder.__class__ ):
def __A ( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = FakeEncoder(bart_model.config , bart_model.model.shared )
A__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A__ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo="bar" )
| 198
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A : List[Any] , _A : List[str]=7 )-> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A : str )-> Dict:
"""simple docstring"""
A__ = get_daily_ci_runs(_A )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def UpperCamelCase ( _A : int , _A : List[str] , _A : str )-> Any:
"""simple docstring"""
A__ = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : List[Any] )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(_A , f"""{artifact_name}.zip""" )
if os.path.isfile(_A ):
A__ = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 198
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.