code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None )-> int:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__A = quote(UpperCAmelCase )
return hfh.hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' , revision=UpperCAmelCase )
| 161 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : List[Any] = 10
lowercase_ : List[str] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowercase_ : List[Any] = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(__SCREAMING_SNAKE_CASE ) ),
} , features=__SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=__SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
_lowercase : Optional[Any] = """\
Text data.
Second line of data."""
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowercase_ : List[str] = FILE_CONTENT
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
import bza
lowercase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowercase_ : Tuple = bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )
with bza.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
import gzip
lowercase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowercase_ : str = bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )
with gzip.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowercase_ : Dict = bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )
with lza.frame.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as archive:
archive.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import tarfile
lowercase_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
import lzma
lowercase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowercase_ : Any = bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )
with lzma.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
import zipfile
lowercase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )
with zstd.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowercase_ : Optional[Any] = textwrap.dedent(
'''\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return filename
_lowercase : Union[str, Any] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_lowercase : Tuple = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_lowercase : str = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_lowercase : Optional[int] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_lowercase : List[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : str = datasets.Dataset.from_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(__SCREAMING_SNAKE_CASE ) ) as con:
lowercase_ : int = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
lowercase_ : Dict = csv.DictWriter(__SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
lowercase_ : Dict = csv.DictWriter(__SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import bza
lowercase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase_ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowercase_ : List[str] = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
lowercase_ : int = pq.ParquetWriter(__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=__SCREAMING_SNAKE_CASE )
writer.write_table(__SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase_ : Optional[int] = {'''data''': DATA}
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase_ : Union[str, Any] = {'''data''': DATA_DICT_OF_LISTS}
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
import gzip
lowercase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
import gzip
lowercase_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(__SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Dict = ['''0''', '''1''', '''2''', '''3''']
lowercase_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[str] = ['''0''', '''1''', '''2''', '''3''']
lowercase_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Dict = ['''0''', '''1''', '''2''', '''3''']
lowercase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(__SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : Any = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowercase_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
"""simple docstring"""
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
"""simple docstring"""
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ) )
f.write(__SCREAMING_SNAKE_CASE , arcname=os.path.basename(__SCREAMING_SNAKE_CASE ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A ( snake_case_ , snake_case_ ):
@register_to_config
def __init__(self : Tuple , __a : int = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , __a ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , __a ) )
def _lowercase (self : Dict , __a : Any = None , __a : List[str] = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(__a ).to(__a ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(__a ).to(__a ) )
return self
def _lowercase (self : Union[str, Any] , __a : Union[str, Any] ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowercase (self : int , __a : Optional[int] ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
from itertools import count
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 50 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [1] * min_block_length
for n in count(SCREAMING_SNAKE_CASE__ ):
fill_count_functions.append(1 )
for block_length in range(SCREAMING_SNAKE_CASE__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 200 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
UpperCamelCase__ : int = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
UpperCamelCase__ : str = {
"""jukebox""": 5_12,
}
class _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = PRETRAINED_LYRIC_TOKENS_SIZES
_A : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=["v3", "v2", "v2"] , lowerCAmelCase__ : Tuple=5_1_2 , lowerCAmelCase__ : Tuple=5 , lowerCAmelCase__ : Tuple="<|endoftext|>" , **lowerCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
super().__init__(
unk_token=lowerCAmelCase__ , n_genres=lowerCAmelCase__ , version=lowerCAmelCase__ , max_n_lyric_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = version
__SCREAMING_SNAKE_CASE : Dict = max_n_lyric_tokens
__SCREAMING_SNAKE_CASE : Tuple = n_genres
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : List[Any] = json.load(lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : List[str] = json.load(lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__SCREAMING_SNAKE_CASE : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
__SCREAMING_SNAKE_CASE : Dict = regex.compile(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.genres_encoder.items()}
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.artists_encoder.get(lowerCAmelCase__ , 0 ) for artist in list_artists]
for genres in range(len(lowerCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE : Tuple = [self.genres_encoder.get(lowerCAmelCase__ , 0 ) for genre in list_genres[genres]]
__SCREAMING_SNAKE_CASE : List[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__SCREAMING_SNAKE_CASE : Optional[Any] = [[self.lyrics_encoder.get(lowerCAmelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return list(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.prepare_for_tokenization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self._tokenize(lowerCAmelCase__ )
return artist, genre, lyrics
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__SCREAMING_SNAKE_CASE : int = artists[idx].lower()
__SCREAMING_SNAKE_CASE : Optional[int] = [genres[idx].lower()]
else:
__SCREAMING_SNAKE_CASE : Any = self._normalize(artists[idx] ) + """.v2"""
__SCREAMING_SNAKE_CASE : Any = [
self._normalize(lowerCAmelCase__ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__SCREAMING_SNAKE_CASE : Any = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
__SCREAMING_SNAKE_CASE : Optional[int] = {vocab[index]: index + 1 for index in range(len(lowerCAmelCase__ ) )}
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ ) + 1
__SCREAMING_SNAKE_CASE : Tuple = self.vocab
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.vocab.items()}
__SCREAMING_SNAKE_CASE : int = """"""
else:
__SCREAMING_SNAKE_CASE : Any = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
__SCREAMING_SNAKE_CASE : Any = self._run_strip_accents(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = lyrics.replace("""\\""" , """\n""" )
__SCREAMING_SNAKE_CASE : List[Any] = self.out_of_vocab.sub("""""" , lowerCAmelCase__ ), [], []
return artists, genres, lyrics
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = unicodedata.normalize("""NFD""" , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for char in text:
__SCREAMING_SNAKE_CASE : Optional[Any] = unicodedata.category(lowerCAmelCase__ )
if cat == "Mn":
continue
output.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = (
[chr(lowerCAmelCase__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(lowerCAmelCase__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(lowerCAmelCase__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
__SCREAMING_SNAKE_CASE : Tuple = frozenset(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""_+""" )
__SCREAMING_SNAKE_CASE : Dict = """""".join([c if c in accepted else """_""" for c in text.lower()] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.sub("""_""" , lowerCAmelCase__ ).strip("""_""" )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
return " ".join(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Optional[Any] = False ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = TensorType(lowerCAmelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant
__SCREAMING_SNAKE_CASE : Dict = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor
__SCREAMING_SNAKE_CASE : List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
__SCREAMING_SNAKE_CASE : List[str] = jnp.array
__SCREAMING_SNAKE_CASE : Optional[int] = _is_jax
else:
__SCREAMING_SNAKE_CASE : List[Any] = np.asarray
__SCREAMING_SNAKE_CASE : List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__SCREAMING_SNAKE_CASE : int = [inputs]
if not is_tensor(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = as_tensor(lowerCAmelCase__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]="" , lowerCAmelCase__ : int="pt" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [0, 0, 0]
__SCREAMING_SNAKE_CASE : Tuple = [artist] * len(self.version )
__SCREAMING_SNAKE_CASE : str = [genres] * len(self.version )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.tokenize(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self._convert_token_to_id(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [-INFINITY] * len(full_tokens[-1] )
__SCREAMING_SNAKE_CASE : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCAmelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCAmelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.artists_decoder.get(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [self.genres_decoder.get(lowerCAmelCase__ ) for genre in genres_index]
__SCREAMING_SNAKE_CASE : int = [self.lyrics_decoder.get(lowerCAmelCase__ ) for character in lyric_index]
return artist, genres, lyrics | 112 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 132 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
lowerCamelCase__ : float
lowerCamelCase__ : TreeNode | None = None
lowerCamelCase__ : TreeNode | None = None
def A ( snake_case__ ):
'''simple docstring'''
def is_valid_tree(snake_case__ ) -> bool:
if node is None:
return True
if not isinstance(snake_case__ , snake_case__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
snake_case__ , snake_case__ , snake_case__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case__ )
)
return is_binary_search_tree_recursive_check(snake_case__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase = BASE_URL + """/user"""
# https://github.com/settings/tokens
__lowerCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def snake_case_ ( snake_case ) -> int:
lowercase__: Union[str, Any] = {
'Authorization': f'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(snake_case , headers=snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : str="attention" ):
A = A = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
A = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
A = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
A = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
A = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
A = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
A = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
A = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Dict=False ):
if split_mlp_wi:
A = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
A = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
A = (wi_a, wi_a)
else:
A = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
A = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def _snake_case ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] ):
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def _snake_case ( snake_case__ : List[Any] , *, snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict = False ):
A = traverse_util.flatten_dict(variables['target'] )
A = {'/'.join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , snake_case__ )
A = collections.OrderedDict()
# Shared embeddings.
A = old['token_embedder/embedding']
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'encoder' , 'pre_attention_layer_norm' )
A , A , A , A = tax_attention_lookup(snake_case__ , snake_case__ , 'encoder' , 'attention' )
A = layer_norm
A = k.T
A = o.T
A = q.T
A = v.T
# Block i, layer 1 (MLP).
A = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'encoder' , 'pre_mlp_layer_norm' )
A , A = tax_mlp_lookup(snake_case__ , snake_case__ , 'encoder' , snake_case__ )
A = layer_norm
if split_mlp_wi:
A = wi[0].T
A = wi[1].T
else:
A = wi.T
A = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , 'encoder' ).T
A = old['encoder/encoder_norm/scale']
if not scalable_attention:
A = tax_relpos_bias_lookup(
snake_case__ , 0 , 'encoder' ).T
A = tax_relpos_bias_lookup(
snake_case__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_self_attention_layer_norm' )
A , A , A , A = tax_attention_lookup(snake_case__ , snake_case__ , 'decoder' , 'self_attention' )
A = layer_norm
A = k.T
A = o.T
A = q.T
A = v.T
# Block i, layer 1 (Cross Attention).
A = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A , A , A , A = tax_attention_lookup(snake_case__ , snake_case__ , 'decoder' , 'encoder_decoder_attention' )
A = layer_norm
A = k.T
A = o.T
A = q.T
A = v.T
# Block i, layer 2 (MLP).
A = tax_layer_norm_lookup(snake_case__ , snake_case__ , 'decoder' , 'pre_mlp_layer_norm' )
A , A = tax_mlp_lookup(snake_case__ , snake_case__ , 'decoder' , snake_case__ )
A = layer_norm
if split_mlp_wi:
A = wi[0].T
A = wi[1].T
else:
A = wi.T
A = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A = tax_relpos_bias_lookup(snake_case__ , snake_case__ , 'decoder' ).T
A = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A = old['decoder/logits_dense/kernel'].T
return new
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
A = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A = state_dict['shared.weight']
return state_dict
def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Any ):
A = checkpoints.load_tax_checkpoint(snake_case__ )
A = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
A = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def _snake_case ( snake_case__ : List[str] , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] = False , snake_case__ : Optional[Any] = False , ):
A = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A = UMTaEncoderModel(snake_case__ )
else:
A = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print('Done' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''')
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=False , ):
"""simple docstring"""
lowercase_ = 4
lowercase_ = 3_2
lowercase_ = (3_2, 3_2)
lowercase_ = torch.manual_seed(0)
lowercase_ = torch.device(lowerCAmelCase_)
lowercase_ = (batch_size, num_channels) + sizes
lowercase_ = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = {"""hidden_states""": hidden_states}
if include_temb:
lowercase_ = 1_2_8
lowercase_ = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
lowercase_ = torch.manual_seed(1)
lowercase_ = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
lowercase_ = floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
lowercase_ = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
lowercase_ = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""")
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
lowercase_ = unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = output[0]
self.assertEqual(output.shape , self.output_shape)
lowercase_ = output[0, -1, -3:, -3:]
lowercase_ = torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3)
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
lowercase_ = model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = output[0]
lowercase_ = torch.device(lowerCAmelCase_)
lowercase_ = randn_tensor(output.shape , device=lowerCAmelCase_)
lowercase_ = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 136 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import random
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase )
elif element > pivot:
greater.append(lowerCAmelCase )
else:
equal.append(lowerCAmelCase )
return less, equal, greater
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if index >= len(lowerCAmelCase ) or index < 0:
return None
__magic_name__ : Dict = items[random.randint(0 , len(lowerCAmelCase ) - 1 )]
__magic_name__ : Optional[int] = 0
__magic_name__ , __magic_name__ , __magic_name__ : Dict = _partition(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : int = len(lowerCAmelCase )
__magic_name__ : Tuple = len(lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase , lowerCAmelCase )
# must be in larger
else:
return quick_select(lowerCAmelCase , index - (m + count) ) | 331 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__( self :List[str] , _A :str , _A :Optional[int] , _A :Optional[Any] = 0 ) -> Optional[Any]:
'''simple docstring'''
__A , __A = row, column
__A = [[default_value for c in range(_A )] for r in range(_A )]
def __str__( self :str ) -> Optional[Any]:
'''simple docstring'''
__A = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__A = 0
for row_vector in self.array:
for obj in row_vector:
__A = max(_A , len(str(_A ) ) )
__A = F'%{max_element_length}s'
# Make string and return
def single_line(_A :Any ) -> str:
nonlocal string_format_identifier
__A = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_A ) for row_vector in self.array )
return s
def __repr__( self :Union[str, Any] ) -> int:
'''simple docstring'''
return str(self )
def lowercase_ ( self :Optional[int] , _A :Dict ) -> str:
'''simple docstring'''
if not (isinstance(_A , (list, tuple) ) and len(_A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Optional[int] , _A :Optional[int] ) -> Dict:
'''simple docstring'''
assert self.validate_indicies(_A )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Any , _A :int , _A :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert self.validate_indicies(_A )
__A = value
def __add__( self :int , _A :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
assert isinstance(_A , _A )
assert self.row == another.row and self.column == another.column
# Add
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c] + another[r, c]
return result
def __neg__( self :str ) -> Any:
'''simple docstring'''
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = -self[r, c]
return result
def __sub__( self :Tuple , _A :Dict ) -> int:
'''simple docstring'''
return self + (-another)
def __mul__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
if isinstance(_A , (int, float) ): # Scalar multiplication
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c] * another
return result
elif isinstance(_A , _A ): # Matrix multiplication
assert self.column == another.row
__A = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__A = F'Unsupported type given for another ({type(_A )})'
raise TypeError(_A )
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
__A = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c]
return result
def lowercase_ ( self :List[Any] , _A :List[str] , _A :Any ) -> Optional[int]:
'''simple docstring'''
assert isinstance(_A , _A ) and isinstance(_A , _A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__A = v.transpose()
__A = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case ( )-> List[str]:
"""simple docstring"""
# a^(-1)
__A = Matrix(3 , 3 , 0 )
for i in range(3 ):
__A = 1
print(f'a^(-1) is {ainv}' )
# u, v
__A = Matrix(3 , 1 , 0 )
__A , __A , __A = 1, 2, -3
__A = Matrix(3 , 1 , 0 )
__A , __A , __A = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase , UpperCAmelCase )}' )
def snake_case ( )-> Any:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
import os
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) as in_file:
lowercase_ : Any = in_file.read()
lowercase_ : Optional[Any] = [[int(__SCREAMING_SNAKE_CASE ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowercase_ : Optional[Any] = [[0 for cell in row] for row in grid]
lowercase_ : List[Any] = len(grid[0] )
lowercase_ : List[str] = [[0 for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
lowercase_ : Union[str, Any] = grid[0][0]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
for j in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int ) -> str:
'''simple docstring'''
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase_ , UpperCAmelCase_ = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =input('Enter integers separated by spaces: ')
SCREAMING_SNAKE_CASE_: list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 1 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase_ : str = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase_ : Dict = [0, 25, 50]
UpperCAmelCase_ : int = [25, 50, 75]
UpperCAmelCase_ : Optional[Any] = fuzz.membership.trimf(X, abca)
UpperCAmelCase_ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase_ : str = np.ones(75)
UpperCAmelCase_ : Optional[Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase_ : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase_ : Any = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase_ : Optional[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase_ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase_ : Tuple = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase_ : Union[str, Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase_ : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase_ : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 200 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = AltDiffusionPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
__SCREAMING_SNAKE_CASE : Any = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE : Any = 7_7
__SCREAMING_SNAKE_CASE : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=0 ):
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE : List[str] = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = AltDiffusionPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE : int = alt_pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE : str = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = text_encoder
__SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = alt_pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.images
__SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""" )
__SCREAMING_SNAKE_CASE : Any = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE : int = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE : Tuple = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 112 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = [[0 for _ in range(__lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
SCREAMING_SNAKE_CASE__ : Any = 1
for n in range(m + 1 ):
for k in range(1 , __lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a :Union[str, Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
a :int = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 132 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' ) | 74 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
"""simple docstring"""
from typing import Any
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> str:
'''simple docstring'''
_validation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# Creates data structures and fill initial step
lowercase_ = {}
lowercase_ = {}
for state in states_space:
lowercase_ = observations_space[0]
lowercase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCAmelCase ) ):
lowercase_ = observations_space[o]
lowercase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase_ = """"""
lowercase_ = -1
for k_state in states_space:
lowercase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase_ = probability
lowercase_ = k_state
# Update probabilities and pointers dicts
lowercase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase_ = arg_max
# The final observation
lowercase_ = observations_space[len(__lowerCAmelCase ) - 1]
# argmax for given final observation
lowercase_ = """"""
lowercase_ = -1
for k_state in states_space:
lowercase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase_ = probability
lowercase_ = k_state
lowercase_ = arg_max
# Process pointers backwards
lowercase_ = last_state
lowercase_ = []
for o in range(len(__lowerCAmelCase ) - 1 , -1 , -1 ):
result.append(__lowerCAmelCase )
lowercase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Any:
'''simple docstring'''
_validate_not_empty(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_validate_lists(__lowerCAmelCase , __lowerCAmelCase )
_validate_dicts(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
_validate_list(__lowerCAmelCase , """observations_space""" )
_validate_list(__lowerCAmelCase , """states_space""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not isinstance(_object , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a list'''
raise ValueError(__lowerCAmelCase )
else:
for x in _object:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a list of strings'''
raise ValueError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Any:
'''simple docstring'''
_validate_dict(__lowerCAmelCase , """initial_probabilities""" , __lowerCAmelCase )
_validate_nested_dict(__lowerCAmelCase , """transition_probabilities""" )
_validate_nested_dict(__lowerCAmelCase , """emission_probabilities""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_validate_dict(_object , __lowerCAmelCase , __lowerCAmelCase )
for x in _object.values():
_validate_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_object , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a dict'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object ):
lowercase_ = F'''{var_name} all keys must be strings'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object.values() ):
lowercase_ = """nested dictionary """ if nested else """"""
lowercase_ = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
with open(lowerCAmelCase ) as metadata_file:
__magic_name__ : Optional[Any] = json.load(lowerCAmelCase )
__magic_name__ : Any = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__magic_name__ : List[Any] = torch.load(lowerCAmelCase , map_location='cpu' )['module']
# Load the entity vocab file
__magic_name__ : Tuple = load_original_entity_vocab(lowerCAmelCase )
# add an entry for [MASK2]
__magic_name__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__magic_name__ : str = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__magic_name__ : Any = AddedToken('<ent>' , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
__magic_name__ : List[str] = AddedToken('<ent2>' , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
__magic_name__ : Dict = json.load(lowerCAmelCase )
__magic_name__ : Dict = 'MLukeTokenizer'
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase )
# Initialize the embeddings of the special tokens
__magic_name__ : str = tokenizer.convert_tokens_to_ids(['@'] )[0]
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
__magic_name__ : str = state_dict['embeddings.word_embeddings.weight']
__magic_name__ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
__magic_name__ : int = word_emb[enta_init_index].unsqueeze(0 )
__magic_name__ : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__magic_name__ : int = state_dict[bias_name]
__magic_name__ : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__magic_name__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__magic_name__ : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__magic_name__ : List[str] = f'encoder.layer.{layer_index}.attention.self.'
__magic_name__ : Optional[int] = state_dict[prefix + matrix_name]
__magic_name__ : Union[str, Any] = state_dict[prefix + matrix_name]
__magic_name__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__magic_name__ : int = state_dict['entity_embeddings.entity_embeddings.weight']
__magic_name__ : List[Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__magic_name__ : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__magic_name__ : int = state_dict['entity_predictions.bias']
__magic_name__ : int = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__magic_name__ : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__magic_name__ : Union[str, Any] = LukeForMaskedLM(config=lowerCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__magic_name__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__magic_name__ : Tuple = state_dict[key]
else:
__magic_name__ : int = state_dict[key]
__magic_name__ , __magic_name__ : Optional[int] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if set(lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__magic_name__ : Union[str, Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase , task='entity_classification' )
__magic_name__ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__magic_name__ : Dict = (0, 9)
__magic_name__ : Union[str, Any] = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
__magic_name__ : Optional[int] = model(**lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ : Optional[int] = torch.Size((1, 33, 768) )
__magic_name__ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__magic_name__ : Tuple = torch.Size((1, 1, 768) )
__magic_name__ : str = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__magic_name__ : Dict = MLukeTokenizer.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = 'Tokyo is the capital of <mask>.'
__magic_name__ : Union[str, Any] = (24, 30)
__magic_name__ : Optional[int] = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
__magic_name__ : Dict = model(**lowerCAmelCase )
__magic_name__ : List[Any] = encoding['input_ids'][0].tolist()
__magic_name__ : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__magic_name__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase )
__magic_name__ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__magic_name__ : List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCAmelCase ) )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[str] = ['[MASK]', '[PAD]', '[UNK]']
__magic_name__ : Any = [json.loads(lowerCAmelCase ) for line in open(lowerCAmelCase )]
__magic_name__ : str = {}
for entry in data:
__magic_name__ : int = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__magic_name__ : str = entity_id
break
__magic_name__ : Union[str, Any] = f'{language}:{entity_name}'
__magic_name__ : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase :List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
def snake_case ( UpperCAmelCase )-> str:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 161 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( snake_case_ ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = Rectangle(height=0.5 , width=0.5 )
lowercase_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase_ : Dict = Rectangle(height=0.25 , width=0.25 )
lowercase_ : int = [mem.copy() for i in range(6 )]
lowercase_ : List[str] = [mem.copy() for i in range(6 )]
lowercase_ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Dict = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Any = Text('''CPU''' , font_size=24 )
lowercase_ : Optional[Any] = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = [mem.copy() for i in range(4 )]
lowercase_ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Optional[int] = Text('''GPU''' , font_size=24 )
lowercase_ : int = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = [mem.copy() for i in range(6 )]
lowercase_ : List[str] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : str = Text('''Model''' , font_size=24 )
lowercase_ : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
lowercase_ : int = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Any = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(__SCREAMING_SNAKE_CASE )
model_arr.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [meta_mem.copy() for i in range(6 )]
lowercase_ : int = [meta_mem.copy() for i in range(6 )]
lowercase_ : List[str] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : Optional[int] = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
lowercase_ : int = Text('''Disk''' , font_size=24 )
lowercase_ : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase_ : Union[str, Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
lowercase_ : str = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
lowercase_ : int = Square(0.3 )
input.set_fill(__SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = Arrow(start=__SCREAMING_SNAKE_CASE , end=__SCREAMING_SNAKE_CASE , color=__SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , __SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase_ : Tuple = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
lowercase_ : Dict = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase_ : List[Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase_ : List[str] = AnimationGroup(
FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase_ : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase_ : List[str] = a_c
lowercase_ : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE ) , FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
lowercase_ : Optional[int] = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.wait()
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __A ( snake_case_ ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : List[Any] = """OwlViTImageProcessor"""
a__ : int = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__(self : Optional[Any] , __a : int=None , __a : str=None , **__a : Optional[int] ):
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__(self : Optional[int] , __a : List[str]=None , __a : int=None , __a : Tuple=None , __a : str="max_length" , __a : Tuple="np" , **__a : str ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
UpperCAmelCase_ = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
UpperCAmelCase_ = []
# Maximum number of queries across batch
UpperCAmelCase_ = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
UpperCAmelCase_ = t + [" "] * (max_num_queries - len(__a ))
UpperCAmelCase_ = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase_ = BatchEncoding()
UpperCAmelCase_ = input_ids
UpperCAmelCase_ = attention_mask
if query_images is not None:
UpperCAmelCase_ = BatchEncoding()
UpperCAmelCase_ = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
UpperCAmelCase_ = query_pixel_values
if images is not None:
UpperCAmelCase_ = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _lowercase (self : List[str] , *__a : str , **__a : Optional[int] ):
return self.image_processor.post_process(*__a , **__a )
def _lowercase (self : Dict , *__a : List[str] , **__a : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : Any , **__a : List[str] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def _lowercase (self : Tuple , *__a : Optional[int] , **__a : Optional[int] ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : str , *__a : Dict , **__a : Tuple ):
return self.tokenizer.decode(*__a , **__a )
@property
def _lowercase (self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def _lowercase (self : Tuple ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase ) + 1
__SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__SCREAMING_SNAKE_CASE : str = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCamelCase ):
for j in range(1 , _lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__SCREAMING_SNAKE_CASE : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__SCREAMING_SNAKE_CASE : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__SCREAMING_SNAKE_CASE : Any = dp[i - 1][j]
else:
__SCREAMING_SNAKE_CASE : str = 0
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase__ : List[str] = """aab"""
UpperCamelCase__ : List[Any] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"{input_string} matches the given pattern {pattern}")
else:
print(f"{input_string} does not match with the given pattern {pattern}") | 112 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a :Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
a :List[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int
_SCREAMING_SNAKE_CASE :Node | None
class __a :
'''simple docstring'''
def __init__( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = None
for i in sorted(_a , reverse=_a ):
SCREAMING_SNAKE_CASE__ : Tuple = Node(_a , self.head )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE__ : List[Any] = node.next_node
def __len__( self ) -> Tuple:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ) -> Dict:
"""simple docstring"""
return " -> ".join([str(_a ) for node in self] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
return SortedLinkedList(list(__lowerCAmelCase ) + list(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a :List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 132 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
import copy
import re
class lowerCamelCase :
lowerCamelCase__ : Dict = """hp"""
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : int = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = prefix
SCREAMING_SNAKE_CASE__ = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> str:
if len(__UpperCAmelCase ) == 0:
return ""
SCREAMING_SNAKE_CASE__ = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCAmelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = """"""
while integer != 0:
SCREAMING_SNAKE_CASE__ = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
SCREAMING_SNAKE_CASE__ = 0
while True:
SCREAMING_SNAKE_CASE__ = word + """#""" + int_to_alphabetic(__UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE__ = sword
break
SCREAMING_SNAKE_CASE__ = short_word
SCREAMING_SNAKE_CASE__ = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = param_name.split("""_""" )
SCREAMING_SNAKE_CASE__ = [TrialShortNamer.shortname_for_word(__UpperCAmelCase , __UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE__ = ["""""", """_"""]
for separator in separators:
SCREAMING_SNAKE_CASE__ = separator.join(__UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE__ = shortname
SCREAMING_SNAKE_CASE__ = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TrialShortNamer.shortname_for_key(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = short_name
SCREAMING_SNAKE_CASE__ = param_name
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Tuple:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE__ = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
SCREAMING_SNAKE_CASE__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = info
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __UpperCAmelCase : Union[str, Any] ) -> Dict:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE__ = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = 1 if v else 0
SCREAMING_SNAKE_CASE__ = """""" if isinstance(__UpperCAmelCase , (int, float) ) else """-"""
SCREAMING_SNAKE_CASE__ = F"""{key}{sep}{v}"""
name.append(__UpperCAmelCase )
return "_".join(__UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , __UpperCAmelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE__ = []
else:
SCREAMING_SNAKE_CASE__ = repr.split("""_""" )
SCREAMING_SNAKE_CASE__ = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = value.split("""-""" )
else:
SCREAMING_SNAKE_CASE__ = re.sub("""[0-9.]""" , """""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = float(re.sub("""[^0-9.]""" , """""" , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = cls.NAMING_INFO["""reverse_short_param"""][p_k]
SCREAMING_SNAKE_CASE__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE__ = cls.DEFAULTS[k]
return parameters
| 165 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=4 , ) -> Any:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: int = batch_size
lowercase__: Union[str, Any] = seq_length
lowercase__: Optional[Any] = is_training
lowercase__: Any = use_attention_mask
lowercase__: str = use_token_type_ids
lowercase__: List[Any] = use_labels
lowercase__: Any = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Dict = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: Tuple = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: List[str] = attention_probs_dropout_prob
lowercase__: Union[str, Any] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Union[str, Any] = type_sequence_label_size
lowercase__: List[Any] = initializer_range
lowercase__: Dict = num_choices
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Any = None
if self.use_attention_mask:
lowercase__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Tuple = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__: Optional[int] = config_and_inputs
lowercase__: str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __a ( snake_case_ , unittest.TestCase ):
__lowercase : Any = True
__lowercase : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = FlaxRoFormerModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__: int = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=lowerCAmelCase__ )
lowercase__: Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowercase__: List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = model(lowerCAmelCase__ )[0]
lowercase__: Any = 50_000
lowercase__: Optional[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
_lowerCamelCase: Optional[torch.FloatTensor] = None
def _snake_case ( snake_case__ : Any , snake_case__ : List[str]=0.999 , snake_case__ : int="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] ,A_ : Dict = 1000 ,A_ : List[Any] = "fixed_small_log" ,A_ : str = True ,A_ : Any = 1.0 ,A_ : Union[str, Any] = "epsilon" ,A_ : Dict = "squaredcos_cap_v2" ,) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
A = betas_for_alpha_bar(A_ )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
A = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A = 1.0
# setable values
A = None
A = torch.from_numpy(np.arange(0 ,A_ )[::-1].copy() )
A = variance_type
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : Dict = None ) -> Tuple:
return sample
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : str = None ) -> Optional[Any]:
A = num_inference_steps
A = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A = torch.from_numpy(A_ ).to(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int]=None ,A_ : str=None ,A_ : Union[str, Any]=None ) -> List[Any]:
if prev_timestep is None:
A = t - 1
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A = self.betas[t]
else:
A = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A = torch.log(torch.clamp(A_ ,min=1e-20 ) )
A = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A = variance.log()
A = beta.log()
A = (predicted_variance + 1) / 2
A = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Any = None ,A_ : Any=None ,A_ : Dict = True ,) -> Optional[int]:
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A , A = torch.split(A_ ,sample.shape[1] ,dim=1 )
else:
A = None
# 1. compute alphas, betas
if prev_timestep is None:
A = t - 1
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A = self.betas[t]
A = self.alphas[t]
else:
A = 1 - alpha_prod_t / alpha_prod_t_prev
A = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A = torch.clamp(
A_ ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=A_ ,device=model_output.device )
A = self._get_variance(
A_ ,predicted_variance=A_ ,prev_timestep=A_ ,)
if self.variance_type == "fixed_small_log":
A = variance
elif self.variance_type == "learned_range":
A = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
' for the UnCLIPScheduler.' )
A = variance * variance_noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A_ ,pred_original_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Any ,A_ : Any ,A_ : Tuple ,) -> Any:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
A = timesteps.to(original_samples.device )
A = alphas_cumprod[timesteps] ** 0.5
A = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A = sqrt_alpha_prod.unsqueeze(-1 )
A = (1 - alphas_cumprod[timesteps]) ** 0.5
A = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase : str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = _distribute_shards(**lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : Dict = _split_gen_kwargs(lowerCAmelCase , lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase )
else:
__magic_name__ : Dict = _number_of_shards_in_gen_kwargs(lowerCAmelCase )
assert out == expected | 331 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
for param in module.parameters():
__A = False
def snake_case ( )-> List[Any]:
"""simple docstring"""
__A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def snake_case ( UpperCAmelCase )-> Any:
"""simple docstring"""
__A = plt.imshow(UpperCAmelCase )
fig.axes.get_xaxis().set_visible(UpperCAmelCase )
fig.axes.get_yaxis().set_visible(UpperCAmelCase )
plt.show()
def snake_case ( )-> Any:
"""simple docstring"""
__A = datetime.now()
__A = current_time.strftime('%H:%M:%S' )
return timestamp
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = value
lowercase_ : Any = None
lowercase_ : Tuple = None
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = tree
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( snake_case_ ):
def __init__(self : str , __a : Tuple , __a : Tuple=13 , __a : Dict=7 , __a : Any=True , __a : Tuple=True , __a : List[str]=True , __a : Optional[int]=True , __a : Optional[Any]=99 , __a : Optional[int]=32 , __a : Dict=5 , __a : Union[str, Any]=4 , __a : int=37 , __a : Union[str, Any]="gelu" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : int=512 , __a : Union[str, Any]=16 , __a : List[str]=2 , __a : Any=0.02 , __a : Dict=False , __a : List[Any]=True , __a : List[Any]="None" , __a : List[str]=3 , __a : Any=4 , __a : int=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = relative_attention
UpperCAmelCase_ = position_biased_input
UpperCAmelCase_ = pos_att_type
UpperCAmelCase_ = scope
def _lowercase (self : int ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Dict ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowercase (self : List[str] , __a : str ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowercase (self : Any , __a : int , __a : List[Any] , __a : List[Any] , __a : str , __a : str , __a : Tuple , __a : Tuple ):
UpperCAmelCase_ = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a )[0]
UpperCAmelCase_ = model(__a , token_type_ids=__a )[0]
UpperCAmelCase_ = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowercase (self : int , __a : List[str] , __a : Any , __a : Any , __a : List[Any] , __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : List[str] , __a : Optional[Any] , __a : List[Any] , __a : Optional[Any] , __a : Dict , __a : List[Any] , __a : List[Any] , __a : Tuple ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def _lowercase (self : str , __a : List[Any] , __a : List[str] , __a : Any , __a : Optional[int] , __a : Any , __a : Optional[Any] , __a : Dict ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self : Tuple , __a : Dict , __a : int , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ):
UpperCAmelCase_ = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self : int , __a : Dict , __a : Tuple , __a : Dict , __a : int , __a : Tuple , __a : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase (self : int ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( snake_case_ , snake_case_ , unittest.TestCase ):
a__ : Dict = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : Dict = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[Any] = True
a__ : Optional[Any] = False
a__ : Tuple = False
a__ : List[Any] = False
a__ : Optional[int] = False
def _lowercase (self : str ):
UpperCAmelCase_ = DebertaVaModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def _lowercase (self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def _lowercase (self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowercase (self : List[str] ):
pass
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
UpperCAmelCase_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
UpperCAmelCase_ = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 1 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE : List[Any] = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 112 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __a (snake_case_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (EulerDiscreteScheduler,)
_SCREAMING_SNAKE_CASE :Optional[Any] = 10
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def _a ( self ) -> Dict:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : str = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a )
SCREAMING_SNAKE_CASE__ : Dict = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE__ : List[Any] = sample.to(_a )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_a , _a , _a , generator=_a )
SCREAMING_SNAKE_CASE__ : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE__ : List[str] = sample.to(_a )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(_a , _a , _a , generator=_a )
SCREAMING_SNAKE_CASE__ : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 132 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase :
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=1_3 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : str=6 , __UpperCAmelCase : Dict=1_7 , __UpperCAmelCase : Dict=2_3 , __UpperCAmelCase : List[Any]=1_1 , __UpperCAmelCase : Optional[Any]=True , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = act_dim
SCREAMING_SNAKE_CASE__ = state_dim
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = is_training
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
SCREAMING_SNAKE_CASE__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
SCREAMING_SNAKE_CASE__ = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE__ = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
SCREAMING_SNAKE_CASE__ = random_attention_mask((self.batch_size, self.seq_length) )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = DecisionTransformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase__ : Optional[Any] = ()
lowerCamelCase__ : Tuple = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase__ : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : int = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DecisionTransformerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DecisionTransformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__UpperCAmelCase )] , __UpperCAmelCase )
@require_torch
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = 2 # number of steps of autoregressive prediction we will perform
SCREAMING_SNAKE_CASE__ = 1_0 # defined by the RL environment, may be normalized
SCREAMING_SNAKE_CASE__ = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
SCREAMING_SNAKE_CASE__ = model.to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.config
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ) # env.reset()
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ = state
SCREAMING_SNAKE_CASE__ = torch.zeros(1 , 0 , config.act_dim , device=__UpperCAmelCase , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = torch.zeros(1 , 0 , device=__UpperCAmelCase , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = torch.tensor(0 , device=__UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCAmelCase )] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCAmelCase )] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(
states=__UpperCAmelCase , actions=__UpperCAmelCase , rewards=__UpperCAmelCase , returns_to_go=__UpperCAmelCase , timesteps=__UpperCAmelCase , attention_mask=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
SCREAMING_SNAKE_CASE__ = action_pred[0, -1]
SCREAMING_SNAKE_CASE__ = torch.cat([states, state] , dim=1 )
SCREAMING_SNAKE_CASE__ = returns_to_go[0, -1] - reward
SCREAMING_SNAKE_CASE__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( snake_case_ , snake_case_ ):
__lowercase : Any = """maskformer-swin"""
__lowercase : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=224 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=96 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 12, 24] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Any = image_size
lowercase__: int = patch_size
lowercase__: List[Any] = num_channels
lowercase__: Optional[int] = embed_dim
lowercase__: List[str] = depths
lowercase__: Optional[int] = len(lowerCAmelCase__ )
lowercase__: Union[str, Any] = num_heads
lowercase__: str = window_size
lowercase__: List[str] = mlp_ratio
lowercase__: List[Any] = qkv_bias
lowercase__: Tuple = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: str = drop_path_rate
lowercase__: Union[str, Any] = hidden_act
lowercase__: Optional[Any] = use_absolute_embeddings
lowercase__: Optional[int] = layer_norm_eps
lowercase__: Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__: Dict = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowercase__: int = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
lowercase__ , lowercase__: Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 196 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowercase = """\
Text data.
Second line of data."""
_lowercase = """file"""
@pytest.fixture(scope='session' )
def _snake_case ( snake_case__ : List[Any] ):
A = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
A = bytes(snake_case__ , 'utf-8' )
with zstd.open(snake_case__ , 'wb' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture
def _snake_case ( snake_case__ : List[str] ):
with open(os.path.join(tmpfs.local_root_dir , snake_case__ ) , 'w' ) as f:
f.write(snake_case__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
A = input_paths[compression_format]
A = tmp_path / 'cache'
A = DownloadConfig(cache_dir=snake_case__ , extract_compressed_file=snake_case__ )
A = cached_path(snake_case__ , download_config=snake_case__ )
with open(snake_case__ ) as f:
A = f.read()
with open(snake_case__ ) as f:
A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
A = 'custom_cache'
A = 'custom_extracted_dir'
A = tmp_path / 'custom_extracted_path'
if default_extracted:
A = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , snake_case__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(snake_case__ ) )
A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A = xz_file
A = (
DownloadConfig(extract_compressed_file=snake_case__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case__ )
)
A = cached_path(snake_case__ , download_config=snake_case__ )
assert Path(snake_case__ ).parent.parts[-2:] == expected
def _snake_case ( snake_case__ : List[str] ):
# absolute path
A = str(Path(snake_case__ ).resolve() )
assert cached_path(snake_case__ ) == text_file
# relative path
A = str(Path(snake_case__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case__ ) == text_file
def _snake_case ( snake_case__ : List[Any] ):
# absolute path
A = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
# relative path
A = './__missing_file__.txt'
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
def _snake_case ( snake_case__ : Dict ):
A = get_from_cache(F'tmp://{tmpfs_file}' )
with open(snake_case__ ) as f:
A = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ )
def _snake_case ( ):
with pytest.raises(snake_case__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ )
def _snake_case ( snake_case__ : List[str] ):
A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(snake_case__ ):
http_get('https://huggingface.co' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(snake_case__ ):
ftp_get('ftp://huggingface.co' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(snake_case__ ):
fsspec_get('s3://huggingface.co' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
fsspec_head('s3://huggingface.co' ) | 74 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase ):
lowercase__ = CpmAntTokenizer
lowercase__ = False
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
@tooslow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""")
lowercase_ = """今天天气真好!"""
lowercase_ = ["""今天""", """天气""", """真""", """好""", """!"""]
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = """今天天气真好!"""
lowercase_ = [tokenizer.bos_token] + tokens
lowercase_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
lowercase_ = tokenizer.decode(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase :Optional[Any] = """scheduler_config.json"""
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : str = 1
A_ : Optional[int] = 2
A_ : Tuple = 3
A_ : Any = 4
A_ : Tuple = 5
A_ : Union[str, Any] = 6
A_ : Optional[int] = 7
A_ : Optional[Any] = 8
A_ : List[Any] = 9
A_ : Any = 10
A_ : Any = 11
A_ : Optional[int] = 12
A_ : Any = 13
A_ : Dict = 14
@dataclass
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : torch.FloatTensor
class _lowerCamelCase :
'''simple docstring'''
A_ : Tuple = SCHEDULER_CONFIG_NAME
A_ : str = []
A_ : List[str] = True
@classmethod
def __lowerCAmelCase ( cls : Dict , _A : Optional[Any] = None , _A : Optional[int] = None , _A : Optional[int]=False , **_A : Dict , ) -> List[str]:
__magic_name__ , __magic_name__ , __magic_name__ : Dict = cls.load_config(
pretrained_model_name_or_path=_A , subfolder=_A , return_unused_kwargs=_A , return_commit_hash=_A , **_A , )
return cls.from_config(_A , return_unused_kwargs=_A , **_A )
def __lowerCAmelCase ( self : Any , _A : int , _A : str = False , **_A : Dict ) -> Union[str, Any]:
self.save_config(save_directory=_A , push_to_hub=_A , **_A )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ) -> Any:
__magic_name__ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
__magic_name__ : int = importlib.import_module(__name__.split('.' )[0] )
__magic_name__ : Tuple = [
getattr(_A , _A ) for c in compatible_classes_str if hasattr(_A , _A )
]
return compatible_classes | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a__ : Optional[Any] = False
class UpperCamelCase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :str ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__A = torch.manual_seed(0 )
__A = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
__A = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = generator.manual_seed(0 )
__A = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
__A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = 'cyberpunk 2077'
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__A = torch.manual_seed(0 )
__A = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__A = 'A painting of a squirrel eating a burger '
__A = torch.manual_seed(0 )
__A = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__A = pipe.image_variation(_A , generator=_A , output_type='numpy' ).images
__A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 161 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import math
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
lowercase_ : Union[str, Any] = n
lowercase_ : Tuple = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowercase_ : Any = [
[math.inf for j in range(0 , __SCREAMING_SNAKE_CASE )] for i in range(0 , __SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = w
def _snake_case ( self ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase_ : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowercase : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [True] * limit
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase_ = i * 2
while index < limit:
UpperCAmelCase_ = False
UpperCAmelCase_ = index + i
UpperCAmelCase_ = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def lowerCAmelCase_ ( snake_case_ : List[Any] = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = prime_sieve(snake_case_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
UpperCAmelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase_ = j - i
UpperCAmelCase_ = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : List[Any] = """luke"""
def __init__( self , __snake_case=5_0267 , __snake_case=50_0000 , __snake_case=768 , __snake_case=256 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=True , __snake_case=None , __snake_case=1 , __snake_case=0 , __snake_case=2 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : Any = vocab_size
_SCREAMING_SNAKE_CASE : Any = entity_vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = entity_emb_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
_SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 200 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase__ : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] ):
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : int = value
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Optional[int] = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE : str = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : Tuple = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : str = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : List[str] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__SCREAMING_SNAKE_CASE : Tuple = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE : Any = """weight"""
else:
__SCREAMING_SNAKE_CASE : List[str] = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[str] = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE : Tuple = name.split(""".""" )
__SCREAMING_SNAKE_CASE : Any = int(items[0] )
__SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__SCREAMING_SNAKE_CASE : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Dict , _lowerCamelCase: Dict=None ):
# load the pre-trained checkpoints
__SCREAMING_SNAKE_CASE : str = torch.load(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
__SCREAMING_SNAKE_CASE : List[Any] = WavLMOrig(_lowerCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__SCREAMING_SNAKE_CASE : int = WavLMConfig.from_pretrained(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : List[str] = WavLMConfig()
__SCREAMING_SNAKE_CASE : int = WavLMModel(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase )
hf_wavlm.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 112 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :str = logging.get_logger(__name__)
a :Any = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __a (snake_case_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = """mvp"""
_SCREAMING_SNAKE_CASE :Any = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _a=50_267 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , _a=False , _a=100 , _a=800 , **_a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = d_model
SCREAMING_SNAKE_CASE__ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = init_std
SCREAMING_SNAKE_CASE__ : List[str] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[Any] = classifier_dropout
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Tuple = use_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_length
SCREAMING_SNAKE_CASE__ : List[Any] = prompt_mid_dim
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _a ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 132 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase = {
"""camembert-base""": 5_12,
}
__lowerCAmelCase = """▁"""
class __a ( snake_case_ ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ["""input_ids""", """attention_mask"""]
__lowercase : Any = CamembertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: str = vocab_file
lowercase__: Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: List[Any] = [self.cls_token_id]
lowercase__: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[str] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase : List[Any] = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowercase_ = bs[:]
lowercase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase_ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]="replace" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="</s>" , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Any="<mask>" , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding="""utf-8""") as vocab_handle:
lowercase_ = json.load(lowerCAmelCase_)
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = errors # how to handle errors in decoding
lowercase_ = bytes_to_unicode()
lowercase_ = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""") as merges_handle:
lowercase_ = merges_handle.read().split("""\n""")[1:-1]
lowercase_ = [tuple(merge.split()) for merge in bpe_merges]
lowercase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
lowercase_ = {}
lowercase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return len(self.encoder)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(lowerCAmelCase_)
lowercase_ = get_pairs(lowerCAmelCase_)
if not pairs:
return token
while True:
lowercase_ = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_: self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(lowerCAmelCase_):
try:
lowercase_ = word.index(lowerCAmelCase_ , lowerCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase_ = j
if word[i] == first and i < len(lowerCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase_ = tuple(lowerCAmelCase_)
lowercase_ = new_word
if len(lowerCAmelCase_) == 1:
break
else:
lowercase_ = get_pairs(lowerCAmelCase_)
lowercase_ = """ """.join(lowerCAmelCase_)
lowercase_ = word
return word
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = []
for token in re.findall(self.pat , lowerCAmelCase_):
lowercase_ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_).split(""" """))
return bpe_tokens
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = """""".join(lowerCAmelCase_)
lowercase_ = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_) + """\n""")
lowercase_ = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""")
lowercase_ = token_index
writer.write(""" """.join(lowerCAmelCase_) + """\n""")
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : List[Any] = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1, 1] + ([0] * len(lowerCAmelCase_)) + [1]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_) > 0 and not text[0].isspace()):
lowercase_ = """ """ + text
return (text, kwargs)
| 136 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution()) | 331 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Union[str, Any] = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
import math
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ="""Input must be a string of 8 numbers plus letter"""
SCREAMING_SNAKE_CASE_: Any ="""TRWAGMYFPDXBNJZSQVHLCKE"""
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""Expected string as input, found {type(snake_case_ ).__name__}"""
raise TypeError(snake_case_ )
UpperCAmelCase_ = spanish_id.replace("-" , "" ).upper()
if len(snake_case_ ) != 9:
raise ValueError(snake_case_ )
try:
UpperCAmelCase_ = int(spanish_id_clean[0:8] )
UpperCAmelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case_ ) from ex
if letter.isdigit():
raise ValueError(snake_case_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : Tuple = """deformable_detr"""
A_ : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __snake_case=True , __snake_case=None , __snake_case=3 , __snake_case=300 , __snake_case=1024 , __snake_case=6 , __snake_case=1024 , __snake_case=8 , __snake_case=6 , __snake_case=1024 , __snake_case=8 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=256 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=True , __snake_case=False , __snake_case="sine" , __snake_case="resnet50" , __snake_case=True , __snake_case=False , __snake_case=4 , __snake_case=4 , __snake_case=4 , __snake_case=False , __snake_case=300 , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , __snake_case=0.25 , __snake_case=False , **__snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = backbone_config.get("""model_type""" )
_SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(__snake_case )
_SCREAMING_SNAKE_CASE : Any = use_timm_backbone
_SCREAMING_SNAKE_CASE : int = backbone_config
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Dict = num_queries
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = d_model
_SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : str = encoder_layers
_SCREAMING_SNAKE_CASE : str = encoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Dict = dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Dict = activation_dropout
_SCREAMING_SNAKE_CASE : str = activation_function
_SCREAMING_SNAKE_CASE : List[str] = init_std
_SCREAMING_SNAKE_CASE : Optional[Any] = init_xavier_std
_SCREAMING_SNAKE_CASE : List[str] = encoder_layerdrop
_SCREAMING_SNAKE_CASE : List[Any] = auxiliary_loss
_SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = backbone
_SCREAMING_SNAKE_CASE : List[str] = use_pretrained_backbone
_SCREAMING_SNAKE_CASE : Optional[int] = dilation
# deformable attributes
_SCREAMING_SNAKE_CASE : int = num_feature_levels
_SCREAMING_SNAKE_CASE : Dict = encoder_n_points
_SCREAMING_SNAKE_CASE : List[str] = decoder_n_points
_SCREAMING_SNAKE_CASE : List[str] = two_stage
_SCREAMING_SNAKE_CASE : List[Any] = two_stage_num_proposals
_SCREAMING_SNAKE_CASE : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_SCREAMING_SNAKE_CASE : Optional[int] = class_cost
_SCREAMING_SNAKE_CASE : Any = bbox_cost
_SCREAMING_SNAKE_CASE : int = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE : List[str] = mask_loss_coefficient
_SCREAMING_SNAKE_CASE : int = dice_loss_coefficient
_SCREAMING_SNAKE_CASE : Dict = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE : str = giou_loss_coefficient
_SCREAMING_SNAKE_CASE : int = eos_coefficient
_SCREAMING_SNAKE_CASE : List[Any] = focal_alpha
_SCREAMING_SNAKE_CASE : List[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[int] = self.__class__.model_type
return output
| 200 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
_A : Union[str, Any] = """yolos"""
def __init__( self : Any , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : int=1E-12 , lowerCAmelCase__ : Union[str, Any]=[5_1_2, 8_6_4] , lowerCAmelCase__ : Optional[Any]=1_6 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=1_0_0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=5 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Dict=0.1 , **lowerCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = image_size
__SCREAMING_SNAKE_CASE : List[Any] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : Tuple = qkv_bias
__SCREAMING_SNAKE_CASE : Dict = num_detection_tokens
__SCREAMING_SNAKE_CASE : Optional[Any] = use_mid_position_embeddings
__SCREAMING_SNAKE_CASE : str = auxiliary_loss
# Hungarian matcher
__SCREAMING_SNAKE_CASE : Dict = class_cost
__SCREAMING_SNAKE_CASE : str = bbox_cost
__SCREAMING_SNAKE_CASE : int = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : List[str] = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
class _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
_A : Optional[Any] = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return 1_2 | 112 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a :List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 132 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case_ ( snake_case ) -> Optional[Any]:
lowercase__: Optional[Any] = {}
lowercase__: List[Any] = tokenizer(example['content'] , truncation=snake_case )['input_ids']
lowercase__: int = len(example['content'] ) / len(output['input_ids'] )
return output
__lowerCAmelCase = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase = time.time()
__lowerCAmelCase = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 196 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _snake_case ( ):
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def _snake_case ( snake_case__ : List[str] ):
print('Generating prime p...' )
A = rabinMiller.generate_large_prime(snake_case__ )
print('Generating prime q...' )
A = rabinMiller.generate_large_prime(snake_case__ )
A = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
A = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(snake_case__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
A = cryptoMath.find_mod_inverse(snake_case__ , (p - 1) * (q - 1) )
A = (n, e)
A = (n, d)
return (public_key, private_key)
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
A , A = generate_key(snake_case__ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main() | 74 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase :Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ["""YolosFeatureExtractor"""]
lowerCAmelCase :Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__ :
def __init__( self :Any , _A :int , _A :Dict=13 , _A :Union[str, Any]=7 , _A :Any=True , _A :List[Any]=True , _A :Any=True , _A :Optional[Any]=True , _A :Optional[int]=99 , _A :Union[str, Any]=64 , _A :Union[str, Any]=5 , _A :Union[str, Any]=4 , _A :Tuple=37 , _A :Dict="gelu" , _A :Any=0.1 , _A :List[Any]=0.1 , _A :Dict=512 , _A :Tuple=16 , _A :List[str]=2 , _A :Optional[int]=0.02 , _A :Optional[Any]=3 , _A :int=4 , _A :Optional[int]=None , ) -> Union[str, Any]:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = vocab_size - 1
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self :List[str] ) -> List[str]:
'''simple docstring'''
__A , __A , __A , __A = self.prepare_config_and_inputs()
__A = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self :Any , _A :Optional[int] , _A :Dict , _A :List[str] ) -> List[Any]:
'''simple docstring'''
__A = GPTNeoXModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A )
__A = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :Any , _A :Any , _A :Union[str, Any] , _A :Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = True
__A = GPTNeoXModel(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :Dict , _A :Tuple , _A :str , _A :List[str] , _A :str ) -> Union[str, Any]:
'''simple docstring'''
__A = GPTNeoXForCausalLM(config=_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :Dict , _A :List[str] , _A :Dict , _A :Any , _A :Optional[int] ) -> Dict:
'''simple docstring'''
__A = self.num_labels
__A = GPTNeoXForQuestionAnswering(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self :List[Any] , _A :int , _A :Union[str, Any] , _A :int , _A :Dict ) -> Tuple:
'''simple docstring'''
__A = self.num_labels
__A = GPTNeoXForSequenceClassification(_A )
model.to(_A )
model.eval()
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self :Optional[Any] , _A :int , _A :Optional[int] , _A :Dict , _A :Any ) -> Optional[Any]:
'''simple docstring'''
__A = self.num_labels
__A = GPTNeoXForTokenClassification(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self :Any , _A :Optional[Any] , _A :Union[str, Any] , _A :Union[str, Any] ) -> int:
'''simple docstring'''
__A = True
__A = GPTNeoXForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A = model(_A , attention_mask=_A , use_cache=_A )
__A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] , dim=-1 )
__A = torch.cat([input_mask, next_mask] , dim=-1 )
__A = model(_A , attention_mask=_A , output_hidden_states=_A )
__A = output_from_no_past['hidden_states'][0]
__A = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -3:, random_slice_idx].detach()
__A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
UpperCAmelCase__ : int = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : str = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Any = False
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = GPTNeoXModelTester(self )
__A = ConfigTester(self , config_class=_A , hidden_size=64 , num_attention_heads=8 )
def lowercase_ ( self :int ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :Dict ) -> Any:
'''simple docstring'''
__A , __A , __A , __A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def lowercase_ ( self :List[Any] ) -> Tuple:
'''simple docstring'''
__A , __A , __A , __A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
__A , __A , __A , __A = self.model_tester.prepare_config_and_inputs_for_decoder()
__A = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
__A , __A , __A , __A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def lowercase_ ( self :int ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
def lowercase_ ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self :List[str] , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ids_tensor([1, 10] , config.vocab_size )
__A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = GPTNeoXModel(_A )
original_model.to(_A )
original_model.eval()
__A = original_model(_A ).last_hidden_state
__A = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = {'type': scaling_type, 'factor': 10.0}
__A = GPTNeoXModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A = scaled_model(_A ).last_hidden_state
__A = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1E-5 ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__A = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_A )
__A = tokenizer('My favorite food is' , return_tensors='pt' ).to(_A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__A = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__A = model.generate(**_A , do_sample=_A , max_new_tokens=20 )
__A = tokenizer.batch_decode(_A )[0]
self.assertEqual(_A , _A )
| 161 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowercase : Optional[Any] = logging.getLogger(__name__)
_lowercase : List[str] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowercase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _snake_case ( self ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCAmelCase_ = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _snake_case ( self ):
"""simple docstring"""
if self.train_file is not None:
lowercase_ : Dict = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase_ : List[str] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : List[str] = [json.loads(__SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())]
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {c: dataset[c] for c in dataset.column_names}
lowercase_ : str = refs
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase_ : Tuple = {}
if data_args.train_file is not None:
lowercase_ : Tuple = data_args.train_file
if data_args.validation_file is not None:
lowercase_ : Optional[int] = data_args.validation_file
lowercase_ : Union[str, Any] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase_ : List[str] = '''text'''
lowercase_ : Tuple = load_dataset(__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , **__SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowercase_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase_ : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase_ : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ : List[str] = AutoModelForMaskedLM.from_config(__SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(__SCREAMING_SNAKE_CASE ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase_ : Any = datasets['''train'''].column_names
else:
lowercase_ : Optional[Any] = datasets['''validation'''].column_names
lowercase_ : Any = '''text''' if '''text''' in column_names else column_names[0]
lowercase_ : List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__SCREAMING_SNAKE_CASE : str ):
# Remove empty lines
lowercase_ : Optional[int] = [line for line in examples['''text'''] if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length )
lowercase_ : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase_ : List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase_ : Tuple = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase_ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase_ : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase_ : List[Any] = DataCollatorForWholeWordMask(tokenizer=__SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ : Union[str, Any] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase_ : Optional[Any] = model_args.model_name_or_path
else:
lowercase_ : Optional[Any] = None
lowercase_ : Any = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase_ : List[str] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase_ : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ : Any = trainer.evaluate()
lowercase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
lowercase_ : int = perplexity
lowercase_ : Dict = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
SCREAMING_SNAKE_CASE_: int ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_: List[Any] ={
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_: Tuple ={
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
SCREAMING_SNAKE_CASE_: List[str] ={
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class __A ( snake_case_ ):
a__ : Any = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = ElectraTokenizer
def __init__(self : int , __a : int=None , __a : int=None , __a : List[str]=True , __a : Optional[int]="[UNK]" , __a : Optional[Any]="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Union[str, Any]="[CLS]" , __a : Optional[int]="[MASK]" , __a : Any=True , __a : Any=None , **__a : str , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(__a , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**__a )
UpperCAmelCase_ = do_lower_case
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Optional[int]=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Optional[Any] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase (self : Dict , __a : List[str] , __a : str = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def __call__( self ):
_SCREAMING_SNAKE_CASE : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : List[Any] = self.unet(__snake_case , __snake_case ).sample
_SCREAMING_SNAKE_CASE : str = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
_SCREAMING_SNAKE_CASE : int = scheduler_output - scheduler_output + torch.ones_like(__snake_case )
return result
| 200 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Tuple=None ):
__SCREAMING_SNAKE_CASE : Tuple = None
if token is not None:
__SCREAMING_SNAKE_CASE : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
__SCREAMING_SNAKE_CASE : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
__SCREAMING_SNAKE_CASE : Optional[Any] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = requests.get(url + F"&page={i + 2}" , headers=_lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Dict=None ):
__SCREAMING_SNAKE_CASE : str = None
if token is not None:
__SCREAMING_SNAKE_CASE : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
__SCREAMING_SNAKE_CASE : str = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
__SCREAMING_SNAKE_CASE : Tuple = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
__SCREAMING_SNAKE_CASE : Optional[int] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__SCREAMING_SNAKE_CASE : Dict = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = requests.get(url + F"&page={i + 2}" , headers=_lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if token is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
__SCREAMING_SNAKE_CASE : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = result.headers["""Location"""]
__SCREAMING_SNAKE_CASE : Tuple = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowerCamelCase , F"{artifact_name}.zip" )
with open(_lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any]=None ):
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[Any] = None
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCamelCase ) as f:
for line in f:
__SCREAMING_SNAKE_CASE : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__SCREAMING_SNAKE_CASE : Any = line[: line.index(""": """ )]
__SCREAMING_SNAKE_CASE : List[str] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__SCREAMING_SNAKE_CASE : Any = line[len("""FAILED """ ) :]
failed_tests.append(_lowerCamelCase )
elif filename == "job_name.txt":
__SCREAMING_SNAKE_CASE : str = line
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase )} for `errors` "
F"and {len(_lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
__SCREAMING_SNAKE_CASE : Tuple = None
if job_name and job_links:
__SCREAMING_SNAKE_CASE : Tuple = job_links.get(_lowerCamelCase , _lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__SCREAMING_SNAKE_CASE : List[str] = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase )]
return result
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any]=None ):
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Optional[int] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase ) )
return errors
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict=None ):
__SCREAMING_SNAKE_CASE : List[str] = Counter()
counter.update([x[1] for x in logs] )
__SCREAMING_SNAKE_CASE : Optional[Any] = counter.most_common()
__SCREAMING_SNAKE_CASE : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__SCREAMING_SNAKE_CASE : str = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : Any = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__SCREAMING_SNAKE_CASE : Optional[int] = test.split("""/""" )[2]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
return test
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[str]=None ):
__SCREAMING_SNAKE_CASE : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__SCREAMING_SNAKE_CASE : int = [x for x in logs if x[2] is not None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = {x[2] for x in logs}
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for test in tests:
__SCREAMING_SNAKE_CASE : int = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__SCREAMING_SNAKE_CASE : List[str] = counter.most_common()
__SCREAMING_SNAKE_CASE : Optional[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__SCREAMING_SNAKE_CASE : int = sum(error_counts.values() )
if n_errors > 0:
__SCREAMING_SNAKE_CASE : Any = {"""count""": n_errors, """errors""": error_counts}
__SCREAMING_SNAKE_CASE : Optional[int] = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = """| no. | error | status |"""
__SCREAMING_SNAKE_CASE : Dict = """|-:|:-|:-|"""
__SCREAMING_SNAKE_CASE : Any = [header, sep]
for error in reduced_by_error:
__SCREAMING_SNAKE_CASE : List[Any] = reduced_by_error[error]["""count"""]
__SCREAMING_SNAKE_CASE : str = F"| {count} | {error[:1_00]} | |"
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = """| model | no. of errors | major error | count |"""
__SCREAMING_SNAKE_CASE : List[Any] = """|-:|-:|-:|-:|"""
__SCREAMING_SNAKE_CASE : str = [header, sep]
for model in reduced_by_model:
__SCREAMING_SNAKE_CASE : List[Any] = reduced_by_model[model]["""count"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
__SCREAMING_SNAKE_CASE : Tuple = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
UpperCamelCase__ : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase__ : Tuple = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase__ : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase__ : str = k.find(''' / ''')
UpperCamelCase__ : Dict = k[index + len(''' / ''') :]
UpperCamelCase__ : Union[str, Any] = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase__ : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase__ : Optional[int] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase__ : Optional[int] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : List[str] = reduce_by_error(errors)
UpperCamelCase__ : List[Any] = reduce_by_model(errors)
UpperCamelCase__ : int = make_github_table(reduced_by_error)
UpperCamelCase__ : List[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 112 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
import random
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = a[left_index]
SCREAMING_SNAKE_CASE__ : List[Any] = left_index + 1
for j in range(left_index + 1 , __lowerCAmelCase ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = a[i - 1], a[left_index]
return i - 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
if left < right:
SCREAMING_SNAKE_CASE__ : int = random.randint(__lowerCAmelCase , right - 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE__ : int = partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
quick_sort_random(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowerCAmelCase , pivot_index + 1 , __lowerCAmelCase ) # recursive quicksort to the right of the pivot point
def _lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
SCREAMING_SNAKE_CASE__ : List[Any] = [int(__lowerCAmelCase ) for item in user_input.split(""",""" )]
quick_sort_random(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 132 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def A ( snake_case__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCamelCase (snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__UpperCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = model
SCREAMING_SNAKE_CASE__ = cache
SCREAMING_SNAKE_CASE__ = force
SCREAMING_SNAKE_CASE__ = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 165 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( snake_case_ ):
__lowercase : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Optional[int] = do_rescale
lowercase__: Tuple = rescale_factor
lowercase__: List[str] = do_pad
lowercase__: str = pad_size
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
lowercase__ , lowercase__: Any = get_image_size(lowerCAmelCase__ )
lowercase__: List[str] = (old_height // size + 1) * size - old_height
lowercase__: int = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: Optional[int] = do_pad if do_pad is not None else self.do_pad
lowercase__: Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__: List[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase__: Any = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
lowercase__: Dict = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
lowercase__: Optional[int] = [self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
lowercase__: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
lowercase__: int = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
default=snake_case_ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(snake_case_ )} )
_lowerCamelCase: str = field(
default=snake_case_ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowerCamelCase: int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowerCamelCase: int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowerCamelCase: bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowerCamelCase: bool = field(
default=snake_case_ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowerCamelCase: float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowerCamelCase: int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowerCamelCase: int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowerCamelCase: int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase: str = """train"""
_lowerCamelCase: Optional[Any] = """dev"""
class lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase: SquadDataTrainingArguments
_lowerCamelCase: List[SquadFeatures]
_lowerCamelCase: Split
_lowerCamelCase: bool
def __init__( self : Tuple ,A_ : Any ,A_ : Optional[Any] ,A_ : List[Any] = None ,A_ : Optional[Any] = Split.train ,A_ : Optional[Any] = False ,A_ : List[str] = None ,A_ : Tuple = "pt" ,) -> List[str]:
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A_ ,A_ ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
A = time.time()
A = torch.load(A_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' ,A_ )
A = self.old_features.get('examples' ,A_ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=A_ ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=A_ ,)
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,A_ ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Tuple ,A_ : Dict ) -> List[str]:
# Convert to Tensors and build dataset
A = self.features[i]
A = torch.tensor(feature.input_ids ,dtype=torch.long )
A = torch.tensor(feature.attention_mask ,dtype=torch.long )
A = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A = torch.tensor(feature.cls_index ,dtype=torch.long )
A = torch.tensor(feature.p_mask ,dtype=torch.float )
A = torch.tensor(feature.is_impossible ,dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position ,dtype=torch.long )
A = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCAmelCase : int = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
UpperCAmelCase : List[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = """rougeLsum"""
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=__lowerCAmelCase )["""rougeLsum"""]
lowercase_ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _SCREAMING_SNAKE_CASE () -> Any:
'''simple docstring'''
lowercase_ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 136 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _A : List[Any] , _A : Any=7 , _A : Union[str, Any]=3 , _A : Union[str, Any]=18 , _A : Any=30 , _A : Tuple=400 , _A : Tuple=True , _A : Any=None , _A : Optional[Any]=True , _A : Union[str, Any]=None , _A : List[str]=True , _A : str=[0.4814_5466, 0.457_8275, 0.4082_1073] , _A : List[str]=[0.2686_2954, 0.2613_0258, 0.2757_7711] , _A : Dict=True , ) -> Any:
__magic_name__ : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
__magic_name__ : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__magic_name__ : int = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : str = image_size
__magic_name__ : Any = min_resolution
__magic_name__ : List[str] = max_resolution
__magic_name__ : Optional[int] = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : Optional[int] = do_center_crop
__magic_name__ : List[str] = crop_size
__magic_name__ : Optional[int] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
__magic_name__ : str = do_convert_rgb
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any]=False , _A : int=False , _A : Optional[int]=False ) -> Tuple:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__magic_name__ : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__magic_name__ : int = []
for i in range(self.batch_size ):
__magic_name__ , __magic_name__ : List[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__magic_name__ : Optional[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
if torchify:
__magic_name__ : Optional[Any] = [torch.from_numpy(_A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _lowerCamelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : int = ChineseCLIPImageProcessingTester(self , do_center_crop=_A )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'do_center_crop' ) )
self.assertTrue(hasattr(_A , 'center_crop' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_convert_rgb' ) )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__magic_name__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__magic_name__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__magic_name__ : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__magic_name__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class _lowerCamelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A )
__magic_name__ : Optional[Any] = 3
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'do_center_crop' ) )
self.assertTrue(hasattr(_A , 'center_crop' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_convert_rgb' ) )
def __lowerCAmelCase ( self : Tuple ) -> int:
pass
def __lowerCAmelCase ( self : Tuple ) -> Dict:
# Initialize image_processing
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__magic_name__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__magic_name__ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 331 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" )-> Tuple:
"""simple docstring"""
__A = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
__A = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
__A = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
__A = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False )-> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__A = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
__A = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
__A = (wi_a, wi_a)
else:
__A = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
__A = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def snake_case ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
__A = traverse_util.flatten_dict(variables['target'] )
__A = {'/'.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__A = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , UpperCAmelCase )
__A = collections.OrderedDict()
# Shared embeddings.
__A = old['token_embedder/embedding']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__A = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
__A , __A , __A , __A = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , 'encoder' , 'attention' )
__A = layer_norm
__A = k.T
__A = o.T
__A = q.T
__A = v.T
# Block i, layer 1 (MLP).
__A = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
__A , __A = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , 'encoder' , UpperCAmelCase )
__A = layer_norm
if split_mlp_wi:
__A = wi[0].T
__A = wi[1].T
else:
__A = wi.T
__A = wo.T
__A = old[
'encoder/relpos_bias/rel_embedding'
].T
__A = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__A = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
__A , __A , __A , __A = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , 'self_attention' )
__A = layer_norm
__A = k.T
__A = o.T
__A = q.T
__A = v.T
# Block i, layer 1 (Cross Attention).
__A = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
__A , __A , __A , __A = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , 'encoder_decoder_attention' )
__A = layer_norm
__A = k.T
__A = o.T
__A = q.T
__A = v.T
# Block i, layer 2 (MLP).
__A = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
__A , __A = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , 'decoder' , UpperCAmelCase )
__A = layer_norm
if split_mlp_wi:
__A = wi[0].T
__A = wi[1].T
else:
__A = wi.T
__A = wo.T
__A = old['decoder/decoder_norm/scale']
__A = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__A = old['decoder/logits_dense/kernel'].T
return new
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
__A = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__A = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__A = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__A = state_dict['shared.weight']
return state_dict
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
__A = checkpoints.load_tax_checkpoint(UpperCAmelCase )
__A = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
__A = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False )-> Dict:
"""simple docstring"""
__A = TaConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__A = TaEncoderModel(UpperCAmelCase )
else:
__A = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('Done' )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
a__ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case_ ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict =logging.get_logger(__name__)
_UpperCAmelCase : Any ={
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """audio-spectrogram-transformer"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=1_6 , __lowercase=True , __lowercase=1_0 , __lowercase=1_0 , __lowercase=1_0_2_4 , __lowercase=1_2_8 , **__lowercase , ) -> List[str]:
super().__init__(**__lowercase )
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : Optional[int] = frequency_stride
lowerCAmelCase_ : str = time_stride
lowerCAmelCase_ : Optional[Any] = max_length
lowerCAmelCase_ : Optional[Any] = num_mel_bins | 262 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 1 |
from datetime import datetime
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> bytes:
lowerCAmelCase_ : Optional[Any] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase_ : Union[str, Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] =input("""Enter Video/IGTV url: """).strip()
_UpperCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 262 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n" | 262 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = DiTPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
SCREAMING_SNAKE_CASE__ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowercase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_0_0_0 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__lowercase , )
lowerCAmelCase_ : Optional[int] = AutoencoderKL()
lowerCAmelCase_ : List[Any] = DDIMScheduler()
lowerCAmelCase_ : Union[str, Any] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Union[str, Any]:
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : int = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Any = '''cpu'''
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : Tuple = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = self.get_dummy_inputs(__lowercase )
lowerCAmelCase_ : int = pipe(**__lowercase ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowerCAmelCase_ : Any = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
lowerCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1e-3 )
def lowercase_ ( self ) -> int:
self._test_inference_batch_single_identical(relax_max_difference=__lowercase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : str = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
lowerCAmelCase_ : Optional[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowerCAmelCase_ : str = pipe.get_label_ids(__lowercase )
lowerCAmelCase_ : Union[str, Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=4_0 , output_type='''np''' ).images
for word, image in zip(__lowercase , __lowercase ):
lowerCAmelCase_ : Dict = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
lowerCAmelCase_ : Any = ['''vase''', '''umbrella''']
lowerCAmelCase_ : List[Any] = pipe.get_label_ids(__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2_5 , output_type='''np''' ).images
for word, image in zip(__lowercase , __lowercase ):
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 262 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : int = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
import torch
lowerCAmelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase_ : Any = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
import torch
lowerCAmelCase_ : Any = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase_ : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 262 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """markuplm"""
def __init__( self , __lowercase=3_0_5_2_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0 , __lowercase=0 , __lowercase=2 , __lowercase=2_5_6 , __lowercase=1_0_2_4 , __lowercase=2_1_6 , __lowercase=1_0_0_1 , __lowercase=3_2 , __lowercase=5_0 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ) -> str:
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : int = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : List[Any] = classifier_dropout
# additional properties
lowerCAmelCase_ : List[Any] = max_depth
lowerCAmelCase_ : Union[str, Any] = max_xpath_tag_unit_embeddings
lowerCAmelCase_ : Union[str, Any] = max_xpath_subs_unit_embeddings
lowerCAmelCase_ : Any = tag_pad_id
lowerCAmelCase_ : List[str] = subs_pad_id
lowerCAmelCase_ : Dict = xpath_unit_hidden_size | 262 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 262 | 1 |
from __future__ import annotations
_UpperCAmelCase : Union[str, Any] =list[tuple[int, int]]
_UpperCAmelCase : Any =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCAmelCase : Optional[Any] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = pos_x
lowerCAmelCase_ : int = pos_y
lowerCAmelCase_ : Tuple = (pos_y, pos_x)
lowerCAmelCase_ : Union[str, Any] = goal_x
lowerCAmelCase_ : Dict = goal_y
lowerCAmelCase_ : List[Any] = g_cost
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Dict = self.calculate_heuristic()
def lowercase_ ( self ) -> float:
lowerCAmelCase_ : Dict = abs(self.pos_x - self.goal_x )
lowerCAmelCase_ : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , __lowercase ) -> bool:
return self.f_cost < other.f_cost
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
lowerCAmelCase_ : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __lowercase )
lowerCAmelCase_ : Union[str, Any] = [self.start]
lowerCAmelCase_ : list[Node] = []
lowerCAmelCase_ : List[str] = False
def lowercase_ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ : Any = True
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
lowerCAmelCase_ : Dict = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
lowerCAmelCase_ : int = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def lowercase_ ( self , __lowercase ) -> list[Node]:
lowerCAmelCase_ : Union[str, Any] = []
for action in delta:
lowerCAmelCase_ : List[str] = parent.pos_x + action[1]
lowerCAmelCase_ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def lowercase_ ( self , __lowercase ) -> Path:
lowerCAmelCase_ : Optional[Any] = node
lowerCAmelCase_ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ : List[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =(0, 0)
_UpperCAmelCase : str =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_UpperCAmelCase : Tuple =GreedyBestFirst(init, goal)
_UpperCAmelCase : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
_UpperCAmelCase : Dict =2
for elem in grid:
print(elem) | 262 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """funnel"""
SCREAMING_SNAKE_CASE__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , __lowercase=3_0_5_2_2 , __lowercase=[4, 4, 4] , __lowercase=None , __lowercase=2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=6_4 , __lowercase=3_0_7_2 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=None , __lowercase=1e-9 , __lowercase="mean" , __lowercase="relative_shift" , __lowercase=True , __lowercase=True , __lowercase=True , **__lowercase , ) -> Optional[Any]:
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = block_sizes
lowerCAmelCase_ : List[str] = [1] * len(__lowercase ) if block_repeats is None else block_repeats
assert len(__lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCAmelCase_ : Optional[Any] = num_decoder_layers
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Dict = d_head
lowerCAmelCase_ : List[Any] = d_inner
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Tuple = initializer_std
lowerCAmelCase_ : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
lowerCAmelCase_ : Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
lowerCAmelCase_ : Dict = attention_type
lowerCAmelCase_ : Dict = separate_cls
lowerCAmelCase_ : Any = truncate_seq
lowerCAmelCase_ : Any = pool_q_only
super().__init__(**__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowercase_ ( self , __lowercase ) -> str:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def lowercase_ ( self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def lowercase_ ( self , __lowercase ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' ) | 262 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert) | 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Any ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure) | 262 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 262 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int:
lowerCAmelCase_ : Any = n * (n + 1) * (2 * n + 1) / 6
lowerCAmelCase_ : Any = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> float:
lowerCAmelCase_ : Dict = u
for i in range(1 , lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = temp * (u - i)
return temp
def lowerCAmelCase ( )-> None:
lowerCAmelCase_ : Tuple = int(input('''enter the numbers of values: ''' ) )
lowerCAmelCase_ : list[list[float]] = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = 0
print('''enter the values of parameters in a list: ''' )
lowerCAmelCase_ : List[str] = list(map(lowerCAmelCase_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : int = float(input() )
lowerCAmelCase_ : Dict = int(input('''enter the value to interpolate: ''' ) )
lowerCAmelCase_ : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
lowerCAmelCase_ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase_ : Optional[Any] = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 262 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : int =16
_UpperCAmelCase : Optional[Any] =32
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> str:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Any = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : Any = 8
else:
lowerCAmelCase_ : int = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase : Tuple =mocked_dataloaders # noqa: F811
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
lowerCAmelCase_ : int = 2
# New Code #
lowerCAmelCase_ : List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : int = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : Optional[int] = int(config['''seed'''] )
lowerCAmelCase_ : Optional[Any] = int(config['''batch_size'''] )
lowerCAmelCase_ : Tuple = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCAmelCase_ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=lowerCAmelCase_ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ : List[str] = parser.parse_args()
lowerCAmelCase_ : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 262 |
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!""" | 262 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """char"""
SCREAMING_SNAKE_CASE__ : List[Any] = """bpe"""
SCREAMING_SNAKE_CASE__ : Dict = """wp"""
_UpperCAmelCase : Optional[int] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""image_processor""", """char_tokenizer"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ViTImageProcessor"""
SCREAMING_SNAKE_CASE__ : str = """MgpstrTokenizer"""
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase ) -> Any:
lowerCAmelCase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
lowerCAmelCase_ : List[str] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowerCAmelCase_ : int = tokenizer
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained('''gpt2''' )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__lowercase , __lowercase )
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> str:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCAmelCase_ : Dict = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None:
lowerCAmelCase_ : Union[str, Any] = self.char_tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : str = encodings['''input_ids''']
return inputs
def lowercase_ ( self , __lowercase ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = sequences
lowerCAmelCase_ : Tuple = char_preds.size(0 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self._decode_helper(__lowercase , '''char''' )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self._decode_helper(__lowercase , '''bpe''' )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self._decode_helper(__lowercase , '''wp''' )
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Optional[Any] = []
for i in range(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase_ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase_ : int = scores.index(max(__lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Optional[Any] = final_strs
lowerCAmelCase_ : Optional[Any] = final_scores
lowerCAmelCase_ : Optional[int] = char_strs
lowerCAmelCase_ : Optional[Any] = bpe_strs
lowerCAmelCase_ : str = wp_strs
return out
def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[int]:
if format == DecodeType.CHARACTER:
lowerCAmelCase_ : Optional[int] = self.char_decode
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : str = '''[s]'''
elif format == DecodeType.BPE:
lowerCAmelCase_ : Tuple = self.bpe_decode
lowerCAmelCase_ : List[str] = 2
lowerCAmelCase_ : Dict = '''#'''
elif format == DecodeType.WORDPIECE:
lowerCAmelCase_ : List[Any] = self.wp_decode
lowerCAmelCase_ : List[str] = 1_0_2
lowerCAmelCase_ : List[Any] = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = [], []
lowerCAmelCase_ : Dict = pred_logits.size(0 )
lowerCAmelCase_ : int = pred_logits.size(1 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = pred_logits.topk(1 , dim=-1 , largest=__lowercase , sorted=__lowercase )
lowerCAmelCase_ : Dict = preds_index.view(-1 , __lowercase )[:, 1:]
lowerCAmelCase_ : List[str] = decoder(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Any = torch.nn.functional.softmax(__lowercase , dim=2 ).max(dim=2 )
lowerCAmelCase_ : List[Any] = preds_max_prob[:, 1:]
for index in range(__lowercase ):
lowerCAmelCase_ : Any = preds_str[index].find(__lowercase )
lowerCAmelCase_ : Tuple = preds_str[index][:pred_eos]
lowerCAmelCase_ : List[str] = preds_index[index].cpu().tolist()
lowerCAmelCase_ : Any = pred_index.index(__lowercase ) if eos_token in pred_index else -1
lowerCAmelCase_ : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase_ : str = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowercase )
conf_scores.append(__lowercase )
return dec_strs, conf_scores
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__lowercase )]
return decode_strs
def lowercase_ ( self , __lowercase ) -> List[str]:
return self.bpe_tokenizer.batch_decode(__lowercase )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : List[str] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__lowercase )]
return decode_strs | 262 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1_000 , lowerCAmelCase_ = True )-> int:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
return int((number_a + number_a) / 2 )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowerCAmelCase_ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowerCAmelCase_ : Any = lower
lowerCAmelCase_ : Optional[int] = higher
lowerCAmelCase_ : Any = []
while True:
lowerCAmelCase_ : Dict = get_avg(lowerCAmelCase_ , lowerCAmelCase_ )
last_numbers.append(lowerCAmelCase_ )
if answer(lowerCAmelCase_ ) == "low":
lowerCAmelCase_ : Tuple = number
elif answer(lowerCAmelCase_ ) == "high":
lowerCAmelCase_ : Optional[Any] = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def lowerCAmelCase ( )-> None:
lowerCAmelCase_ : int = int(input('''Enter lower value : ''' ).strip() )
lowerCAmelCase_ : str = int(input('''Enter high value : ''' ).strip() )
lowerCAmelCase_ : Tuple = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 262 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 1 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Dict ={"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any =[
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =[
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 262 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""Translation""", init=UpperCAmelCase__, repr=UpperCAmelCase__ )
def __call__( self ) -> Union[str, Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[List] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""TranslationVariableLanguages""", init=UpperCAmelCase__, repr=UpperCAmelCase__ )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase_ : int = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase_ ( self , __lowercase ) -> str:
lowerCAmelCase_ : Union[str, Any] = set(self.languages )
if self.languages and set(__lowercase ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(__lowercase ) - lang_set ) )}) are not in valid set ({", ".join(__lowercase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase_ : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(__lowercase , __lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = zip(*sorted(__lowercase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
} | 262 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 1 |
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 0.1 )-> int:
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 262 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """Salesforce/blip-image-captioning-base"""
SCREAMING_SNAKE_CASE__ : Dict = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = """image_captioner"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""image"""]
SCREAMING_SNAKE_CASE__ : str = ["""text"""]
def __init__( self , *__lowercase , **__lowercase ) -> Union[str, Any]:
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
return self.pre_processor(images=__lowercase , return_tensors='''pt''' )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
return self.model.generate(**__lowercase )
def lowercase_ ( self , __lowercase ) -> str:
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip() | 262 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = tempfile.mkdtemp()
lowerCAmelCase_ : Tuple = 8
# DPR tok
lowerCAmelCase_ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__lowercase , exist_ok=__lowercase )
lowerCAmelCase_ : Any = os.path.join(__lowercase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase_ : Any = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase_ : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ : Optional[int] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__lowercase , exist_ok=__lowercase )
lowerCAmelCase_ : int = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ : Union[str, Any] = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def lowercase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowercase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowercase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
lowerCAmelCase_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase_ : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__lowercase )
rag_tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = RagTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : List[Any] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
lowerCAmelCase_ : Optional[Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase_ : Optional[Any] = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[str] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
lowerCAmelCase_ : Dict = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCAmelCase_ : Union[str, Any] = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase ) | 262 |
from __future__ import annotations
import math
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> None:
lowerCAmelCase_ : str = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2
def lowercase_ ( self , __lowercase ) -> int:
return idx * 2 + 1
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> None:
if left_element == right_element:
lowerCAmelCase_ : Tuple = a[left_element - 1]
else:
lowerCAmelCase_ : int = (left_element + right_element) // 2
self.build(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase )
self.build(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase )
lowerCAmelCase_ : Any = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> bool:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = False
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Any = self.lazy[idx]
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : Dict = val
if left_element != right_element:
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.update(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(__lowercase )] , self.segment_tree[self.right(__lowercase )] )
return True
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int | float:
if self.flag[idx] is True:
lowerCAmelCase_ : Union[str, Any] = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[Any] = self.lazy[idx]
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : List[Any] = (left_element + right_element) // 2
lowerCAmelCase_ : Tuple = self.query(self.left(__lowercase ) , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self.query(self.right(__lowercase ) , mid + 1 , __lowercase , __lowercase , __lowercase )
return max(__lowercase , __lowercase )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , __lowercase , __lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCAmelCase : str =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCAmelCase : List[str] =15
_UpperCAmelCase : Any =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 262 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCAmelCase_ : List[Any] = TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
lowerCAmelCase_ : Tuple = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase_ : Union[str, Any] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Tuple = True
# hparam_utils.py hparams
lowerCAmelCase_ : Dict = 0.664694
lowerCAmelCase_ : Any = 0.207951
lowerCAmelCase_ : List[str] = 0.121194
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Tuple = 0.0352513
lowerCAmelCase_ : Dict = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase_ : Union[str, Any] = 4
lowerCAmelCase_ : List[str] = False
# hparam_utils.py hparams
lowerCAmelCase_ : Optional[Any] = 36.4519
lowerCAmelCase_ : List[Any] = 0.903421
lowerCAmelCase_ : Dict = 222.088
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Optional[Any] = 0.763141
lowerCAmelCase_ : Union[str, Any] = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
lowerCAmelCase_ : Optional[Any] = TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
lowerCAmelCase_ : List[Any] = TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase_ : Dict = TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase_ : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_UpperCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Tuple =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 262 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 262 | 1 |
import os
def lowerCAmelCase ( lowerCAmelCase_ = "input.txt" )-> int:
with open(os.path.join(os.path.dirname(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) as input_file:
lowerCAmelCase_ : Union[str, Any] = [
[int(lowerCAmelCase_ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = len(matrix[0] )
lowerCAmelCase_ : int = [[-1 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : int = matrix[i][0]
for j in range(1 , lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase_ : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.