code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = [x.strip() for x in open(lowerCamelCase_ ).readlines()]
lowercase__ : List[Any] = [x.strip() for x in open(lowerCamelCase_ ).readlines()][: len(lowerCamelCase_ )]
lowercase__ : Dict = calculate_rouge(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
if save_path is not None:
save_json(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]=0.9 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , ) -> Tuple:
A : List[Any] =size if size is not None else {'shortest_edge': 30}
A : Any =crop_size if crop_size is not None else {'height': 30, 'width': 30}
A : str =parent
A : Union[str, Any] =batch_size
A : int =num_channels
A : Optional[Any] =min_resolution
A : str =max_resolution
A : Any =do_resize_and_center_crop
A : List[str] =size
A : Optional[Any] =crop_pct
A : Any =crop_size
A : Any =do_normalize
A : Dict =image_mean
A : Tuple =image_std
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
'''simple docstring'''
lowercase : int = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
A : List[Any] =PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
A : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'crop_pct' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
A : Any =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A : int =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
A : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A : Dict =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : List[str] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A : Optional[Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( _a ):
lowercase : Tuple ="""realm"""
def __init__( self : int , UpperCamelCase__ : List[str]=3_05_22 , UpperCamelCase__ : Optional[int]=7_68 , UpperCamelCase__ : int=1_28 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Tuple=8 , UpperCamelCase__ : Dict=30_72 , UpperCamelCase__ : Any="gelu_new" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : str=5_12 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=1e-1_2 , UpperCamelCase__ : List[str]=2_56 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Tuple=1e-3 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : List[str]=3_20 , UpperCamelCase__ : str=13_35_37_18 , UpperCamelCase__ : int=50_00 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Optional[int]=2 , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
# Common config
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = retriever_proj_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_candidates
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
# Reader config
UpperCAmelCase = span_hidden_size
UpperCAmelCase = max_span_width
UpperCAmelCase = reader_layer_norm_eps
UpperCAmelCase = reader_beam_size
UpperCAmelCase = reader_seq_len
# Retrieval config
UpperCAmelCase = num_block_records
UpperCAmelCase = searcher_beam_size
| 323 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 0 |
import torch
from diffusers import StableDiffusionPipeline
snake_case = "path-to-your-trained-model"
snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
snake_case = "A photo of sks dog in a bucket"
snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A_ ( _a ):
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'tf_padding'))
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'depth_multiplier'))
class A_ :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=1_3 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Tuple=3_2 ,SCREAMING_SNAKE_CASE__ : Dict=0.25 ,SCREAMING_SNAKE_CASE__ : Optional[int]=8 ,SCREAMING_SNAKE_CASE__ : int=8 ,SCREAMING_SNAKE_CASE__ : Dict=6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu6" ,SCREAMING_SNAKE_CASE__ : Dict=1_2_8_0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Tuple=1_0 ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = num_channels
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Optional[int] = depth_multiplier
__lowerCamelCase : str = depth_divisible_by
__lowerCamelCase : List[Any] = min_depth
__lowerCamelCase : str = expand_ratio
__lowerCamelCase : Any = tf_padding
__lowerCamelCase : int = output_stride
__lowerCamelCase : Optional[Any] = first_layer_is_expansion
__lowerCamelCase : str = finegrained_output
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
__lowerCamelCase : Tuple = classifier_dropout_prob
__lowerCamelCase : Union[str, Any] = use_labels
__lowerCamelCase : int = is_training
__lowerCamelCase : List[Any] = num_labels
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = scope
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : str = None
__lowerCamelCase : Tuple = None
if self.use_labels:
__lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.num_labels)
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Optional[Any]):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : int = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = self.num_labels
__lowerCamelCase : Dict = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : List[str] = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
__lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : Optional[Any] = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[str] = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = MobileNetVaModelTester(self)
__lowerCamelCase : Tuple = MobileNetVaConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def lowerCAmelCase ( self : Any):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def lowerCAmelCase ( self : int):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def lowerCAmelCase ( self : List[str]):
pass
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = outputs.hidden_states
__lowerCamelCase : Union[str, Any] = 1_6
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : int):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Union[str, Any]):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = torch.tensor([0.2445, -1.1993, 0.1905]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
__lowerCamelCase : Union[str, Any] = model.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = outputs.logits
# verify the logits
__lowerCamelCase : Any = torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=SCREAMING_SNAKE_CASE__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : Dict ) -> float:
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 0 |
'''simple docstring'''
import numpy as np
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
return np.where(vector > 0 ,lowerCamelCase_ ,(alpha * (np.exp(lowerCamelCase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE :List[Any] = tuple[int, int, int]
SCREAMING_SNAKE_CASE :Tuple = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE :Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE :List[Any] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
SCREAMING_SNAKE_CASE :int = "FOBHMDKEXQNRAULPGSJVTYICZW"
SCREAMING_SNAKE_CASE :Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
SCREAMING_SNAKE_CASE :Tuple = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE :List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
SCREAMING_SNAKE_CASE :Dict = "SGLCPQWZHKXAREONTFBVIYJUDM"
SCREAMING_SNAKE_CASE :Dict = "HVSICLTYKQUBXDWAJZOMFGPREN"
SCREAMING_SNAKE_CASE :Union[str, Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
SCREAMING_SNAKE_CASE :int = "LFKIJODBEGAMQPXVUHYSTCZRWN"
SCREAMING_SNAKE_CASE :List[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCAmelCase ( a_ , a_ , a_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
__A = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
__A = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
__A = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
__A = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
__A = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
__A = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def UpperCAmelCase ( a_ ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__A = F'''Plugboard setting isn\'t type string ({type(lowerCamelCase_ )})'''
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
__A = F'''Odd number of symbols ({len(lowerCamelCase_ )})'''
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
__A = set()
for i in pbstring:
if i not in abc:
__A = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
__A = F'''Duplicate symbol ({i})'''
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
__A = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
__A = pbstring[j + 1]
__A = pbstring[j]
return pb
def UpperCAmelCase ( a_ , a_ , a_ = (rotora, rotora, rotora) , a_ = "" , ) -> str:
"""simple docstring"""
__A = text.upper()
__A = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
__A = rotor_position
__A = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__A = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__A = plugboard[symbol]
# rotor ra --------------------------
__A = abc.index(lowerCamelCase_ ) + rotorposa
__A = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
__A = abc.index(lowerCamelCase_ ) + rotorposa
__A = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
__A = abc.index(lowerCamelCase_ ) + rotorposa
__A = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__A = reflector[symbol]
# 2nd rotors
__A = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
__A = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
__A = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__A = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
__A = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
__A = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
__A = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = "This is my Python script that emulates the Enigma machine from WWII."
SCREAMING_SNAKE_CASE :Union[str, Any] = (1, 1, 1)
SCREAMING_SNAKE_CASE :List[str] = "pictures"
SCREAMING_SNAKE_CASE :Any = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE :Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Any =logging.get_logger(__name__)
_UpperCamelCase : Any ={"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase__ ( _a ):
__snake_case : int = """ctrl"""
__snake_case : List[Any] = ["""past_key_values"""]
__snake_case : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self ,A__=246534 ,A__=256 ,A__=1280 ,A__=8192 ,A__=48 ,A__=16 ,A__=0.1 ,A__=0.1 ,A__=1E-6 ,A__=0.02 ,A__=True ,**A__ ,):
_A : Optional[int] = vocab_size
_A : Optional[Any] = n_positions
_A : int = n_embd
_A : Union[str, Any] = n_layer
_A : Tuple = n_head
_A : Any = dff
_A : Tuple = resid_pdrop
_A : List[str] = embd_pdrop
_A : Dict = layer_norm_epsilon
_A : Optional[int] = initializer_range
_A : Tuple = use_cache
super().__init__(**A__ )
| 206 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __snake_case ( SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Union[str, Any] , SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCamelCase_ , 2 ) * torus_radius * tube_radius
def __snake_case ( SCREAMING_SNAKE_CASE: Tuple , SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_lowerCAmelCase = (sidea + sidea + sidea) / 2
_lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __snake_case ( SCREAMING_SNAKE_CASE: Tuple , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(1_0, 2_0) = }')
print(f'Square: {area_square(1_0) = }')
print(f'Triangle: {area_triangle(1_0, 1_0) = }')
print(f'Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }')
print(f'Parallelogram: {area_parallelogram(1_0, 2_0) = }')
print(f'Rhombus: {area_rhombus(1_0, 2_0) = }')
print(f'Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }')
print(f'Circle: {area_circle(2_0) = }')
print(f'Ellipse: {area_ellipse(1_0, 2_0) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(2_0) = }')
print(f'Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }')
print(f'Sphere: {surface_area_sphere(2_0) = }')
print(f'Hemisphere: {surface_area_hemisphere(2_0) = }')
print(f'Cone: {surface_area_cone(1_0, 2_0) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }')
print(f'Cylinder: {surface_area_cylinder(1_0, 2_0) = }')
print(f'Torus: {surface_area_torus(2_0, 1_0) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 1_0) = }')
print(f'Square: {area_reg_polygon(4, 1_0) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 1_0) = }')
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( _UpperCamelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] = fname.split(os.path.sep )[-1]
return re.search(r"""^(.*)_\d+\.jpg$""" , lowerCamelCase_ ).groups()[0]
class lowerCamelCase__ ( _a ):
def __init__( self : List[str] , __a : Any , __a : Optional[int]=None , __a : List[Any]=None ):
'''simple docstring'''
lowerCamelCase__: int = file_names
lowerCamelCase__: List[str] = image_transform
lowerCamelCase__: List[Any] = label_to_id
def __len__( self : Dict ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : int , __a : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = self.file_names[idx]
lowerCamelCase__: Tuple = PIL.Image.open(__a )
lowerCamelCase__: Any = raw_image.convert("""RGB""" )
if self.image_transform is not None:
lowerCamelCase__: Dict = self.image_transform(__a )
lowerCamelCase__: Dict = extract_label(__a )
if self.label_to_id is not None:
lowerCamelCase__: List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if args.with_tracking:
lowerCamelCase__: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase__: Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__: List[str] = config['lr']
lowerCamelCase__: Optional[int] = int(config["""num_epochs"""] )
lowerCamelCase__: Union[str, Any] = int(config["""seed"""] )
lowerCamelCase__: Dict = int(config["""batch_size"""] )
lowerCamelCase__: List[str] = config['image_size']
if not isinstance(lowerCamelCase_ , (list, tuple) ):
lowerCamelCase__: List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__: Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__: int = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__: Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__: Tuple = os.path.split(lowerCamelCase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCamelCase_ , lowerCamelCase_ )
# Grab all the image filenames
lowerCamelCase__: str = [os.path.join(args.data_dir , lowerCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
lowerCamelCase__: List[str] = [extract_label(lowerCamelCase_ ) for fname in file_names]
lowerCamelCase__: Dict = list(set(lowerCamelCase_ ) )
id_to_label.sort()
lowerCamelCase__: Optional[int] = {lbl: i for i, lbl in enumerate(lowerCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(lowerCamelCase_ )
torch.manual_seed(lowerCamelCase_ )
torch.cuda.manual_seed_all(lowerCamelCase_ )
# Split our filenames between train and validation
lowerCamelCase__: Optional[Any] = np.random.permutation(len(lowerCamelCase_ ) )
lowerCamelCase__: str = int(0.8 * len(lowerCamelCase_ ) )
lowerCamelCase__: Any = random_perm[:cut]
lowerCamelCase__: Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__: Optional[int] = Compose([RandomResizedCrop(lowerCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__: Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__: Union[str, Any] = Compose([Resize(lowerCamelCase_ ), ToTensor()] )
lowerCamelCase__: Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# Instantiate dataloaders.
lowerCamelCase__: List[str] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
lowerCamelCase__: List[Any] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__: List[str] = create_model("""resnet50d""" , pretrained=lowerCamelCase_ , num_classes=len(lowerCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__: Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__: Dict = False
for param in model.get_classifier().parameters():
lowerCamelCase__: Any = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__: int = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__: Union[str, Any] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__: int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase__: List[Any] = OneCycleLR(optimizer=lowerCamelCase_ , max_lr=lowerCamelCase_ , epochs=lowerCamelCase_ , steps_per_epoch=len(lowerCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__: Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__: Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__: List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__: Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__: List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__: str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__: Union[str, Any] = os.path.splitext(lowerCamelCase_ )[0]
if "epoch" in training_difference:
lowerCamelCase__: Any = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
lowerCamelCase__: Optional[Any] = None
else:
lowerCamelCase__: Dict = int(training_difference.replace("""step_""" , """""" ) )
lowerCamelCase__: Any = resume_step // len(lowerCamelCase_ )
resume_step -= starting_epoch * len(lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
if args.with_tracking:
lowerCamelCase__: Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__: Union[str, Any] = accelerator.skip_first_batches(lowerCamelCase_ , lowerCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__: Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__: Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__: List[Any] = (batch['image'] - mean) / std
lowerCamelCase__: Optional[int] = model(lowerCamelCase_ )
lowerCamelCase__: Union[str, Any] = torch.nn.functional.cross_entropy(lowerCamelCase_ , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__: Tuple = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__: Tuple = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
model.eval()
lowerCamelCase__: List[str] = 0
lowerCamelCase__: List[Any] = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__: Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__: List[Any] = (batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__: Optional[Any] = model(lowerCamelCase_ )
lowerCamelCase__: Any = outputs.argmax(dim=-1 )
lowerCamelCase__: Optional[int] = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
lowerCamelCase__: int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__: Union[str, Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(lowerCamelCase_ ),
"""epoch""": epoch,
} , step=lowerCamelCase_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__: Any = f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__: Dict = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=lowerCamelCase_ , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCamelCase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase__: str = parser.parse_args()
lowerCamelCase__: List[str] = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 306 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__(_a , unittest.TestCase ):
"""simple docstring"""
lowercase_ = KandinskyVaaControlnetImgaImgPipeline
lowercase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ = False
@property
def snake_case ( self : Any ):
return 32
@property
def snake_case ( self : List[Any] ):
return 32
@property
def snake_case ( self : Tuple ):
return self.time_input_dim
@property
def snake_case ( self : List[str] ):
return self.time_input_dim * 4
@property
def snake_case ( self : List[str] ):
return 100
@property
def snake_case ( self : str ):
torch.manual_seed(0 )
lowercase__ : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ : Tuple = UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def snake_case ( self : Dict ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase__ : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Any ):
lowercase__ : List[str] = self.dummy_unet
lowercase__ : List[Any] = self.dummy_movq
lowercase__ : Union[str, Any] = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowercase__ : Any = DDIMScheduler(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int]=0 ):
lowercase__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE )
# create init_image
lowercase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : str = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : Any = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case ( self : Union[str, Any] ):
lowercase__ : Dict = 'cpu'
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Any = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
lowercase__ : Tuple = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowercase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowercase__ : Dict = init_image.resize((512, 512) )
lowercase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowercase__ : List[str] = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE ) ).float() / 255.0
lowercase__ : Union[str, Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase__ : int = 'A robot, 4k photo'
lowercase__ : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowercase__ : Union[str, Any] = pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : str = pipe_prior(
SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.85 , generator=SCREAMING_SNAKE_CASE , negative_prompt="" , ).to_tuple()
lowercase__ : Union[str, Any] = pipeline(
image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , hint=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowercase__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Tuple ={
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
'''simple docstring'''
lowercase : Dict = """blip_2_vision_model"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=14_08 , SCREAMING_SNAKE_CASE__ : List[Any]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]=39 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : Dict=14 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : int=0.0_0_0_0_1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-10 , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Optional[int] =hidden_size
A : Dict =intermediate_size
A : Dict =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Tuple =patch_size
A : str =image_size
A : Optional[int] =initializer_range
A : Any =attention_dropout
A : List[str] =layer_norm_eps
A : Dict =hidden_act
A : Tuple =qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A : str =cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
A : Tuple =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE_ ( _a ):
'''simple docstring'''
lowercase : Union[str, Any] = """blip_2_qformer"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=3_05_22 , SCREAMING_SNAKE_CASE__ : Dict=7_68 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : str=30_72 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=1e-12 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=14_08 , **SCREAMING_SNAKE_CASE__ : str , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =vocab_size
A : Dict =hidden_size
A : List[Any] =num_hidden_layers
A : Optional[Any] =num_attention_heads
A : str =hidden_act
A : Optional[Any] =intermediate_size
A : int =hidden_dropout_prob
A : List[Any] =attention_probs_dropout_prob
A : List[Any] =max_position_embeddings
A : int =initializer_range
A : Optional[int] =layer_norm_eps
A : Union[str, Any] =position_embedding_type
A : str =cross_attention_frequency
A : List[str] =encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
A : str =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE_ ( _a ):
'''simple docstring'''
lowercase : Dict = """blip-2"""
lowercase : Optional[int] = True
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=32 , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
A : int ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
A : Any ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
A : Tuple ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
A : Dict =BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
A : List[Any] =BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =text_config['model_type'] if 'model_type' in text_config else 'opt'
A : List[Any] =CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
A : Optional[int] =self.text_config.tie_word_embeddings
A : Dict =self.text_config.is_encoder_decoder
A : List[str] =num_query_tokens
A : List[str] =self.vision_config.hidden_size
A : List[str] =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A : Optional[int] =1.0
A : Union[str, Any] =0.0_2
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : Optional[Any] =copy.deepcopy(self.__dict__ )
A : int =self.vision_config.to_dict()
A : Optional[int] =self.qformer_config.to_dict()
A : Union[str, Any] =self.text_config.to_dict()
A : List[str] =self.__class__.model_type
return output
| 305 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase_() -> List[Any]:
UpperCAmelCase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCAmelCase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCamelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 323 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
UpperCamelCase_ : Any = """bert"""
def __init__( self : str , UpperCAmelCase_ : List[Any]=3_0522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Optional[int]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : str=1E-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
@property
def _A ( self : int ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 62 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> int:
if config_name_or_path is None:
__lowerCamelCase : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : str = question_encoder_name_or_path
__lowerCamelCase : Dict = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowerCamelCase : Dict = RagConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : Any = gen_config
__lowerCamelCase : Dict = question_encoder_config
__lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
rag_model.save_pretrained(lowerCamelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase_ )
# Save tokenizers.
__lowerCamelCase : int = AutoTokenizer.from_pretrained(lowerCamelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
a =parser.parse_args()
a =Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 652 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import random
class __magic_name__ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[int] = [ord(snake_case ) for i in text]
A_ : str = []
A_ : Tuple = []
for i in plain:
A_ : List[Any] = random.randint(1 , 300 )
A_ : Union[str, Any] = (i + k) * k
cipher.append(snake_case )
key.append(snake_case )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Union[str, Any] , snake_case :Dict ):
'''simple docstring'''
A_ : List[Any] = []
for i in range(len(snake_case ) ):
A_ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case ) )
return "".join(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : int = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 454 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _a ( _a ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
super().__init__(*A, **A )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ ( self, A=None, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : Optional[int] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Dict = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, A, **A ):
'''simple docstring'''
return super().__call__(A, **A )
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = load_image(A )
if prompt is not None:
if not isinstance(A, A ):
raise ValueError(
F"Received an invalid text input, got - {type(A )} - but expected a single string. "
'Note also that one single text can be provided for conditional image to text generation.' )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=A, return_tensors=self.framework )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(text=A, add_special_tokens=A ).input_ids
SCREAMING_SNAKE_CASE : List[str] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=A, header_text=A, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(images=A, return_tensors=self.framework )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(A, return_tensors=self.framework )
model_inputs.update(A )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
SCREAMING_SNAKE_CASE : str = self.image_processor(images=A, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
return model_inputs
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'], A )
and all(x is None for x in model_inputs['input_ids'] )
):
SCREAMING_SNAKE_CASE : Optional[Any] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.generate(A, **A, **A )
return model_outputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : Tuple = {
'generated_text': self.tokenizer.decode(
A, skip_special_tokens=A, )
}
records.append(A )
return records
| 28 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : str =logging.get_logger(__name__)
_UpperCamelCase : List[str] ={"vocab_file": "vocab.txt"}
_UpperCamelCase : Optional[int] ={
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCamelCase : Any ={
"openbmb/cpm-ant-10b": 1024,
}
def a__ (__lowercase :Tuple ) -> List[str]:
_A : List[str] = collections.OrderedDict()
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_A : Tuple = reader.readlines()
for index, token in enumerate(lowerCamelCase_ ):
_A : Dict = token.rstrip('''\n''' )
_A : List[Any] = index
return vocab
class UpperCAmelCase__ ( _a ):
def __init__( self ,A__ ,A__="<unk>" ,A__=200 ):
_A : Any = vocab
_A : Tuple = unk_token
_A : Any = max_input_chars_per_word
def A__ ( self ,A__ ):
_A : Dict = list(A__ )
if len(A__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_A : Optional[int] = 0
_A : Optional[Any] = []
while start < len(A__ ):
_A : str = len(A__ )
_A : Union[str, Any] = None
while start < end:
_A : List[str] = ''.join(chars[start:end] )
if substr in self.vocab:
_A : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A__ )
_A : List[str] = end
return sub_tokens
class UpperCAmelCase__ ( _a ):
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ["""input_ids""", """attention_mask"""]
__snake_case : int = False
def __init__( self ,A__ ,A__="<d>" ,A__="</d>" ,A__="<s>" ,A__="</s>" ,A__="<pad>" ,A__="<unk>" ,A__="</n>" ,A__="</_>" ,A__="left" ,**A__ ,):
requires_backends(self ,['''jieba'''] )
super().__init__(
bod_token=A__ ,eod_token=A__ ,bos_token=A__ ,eos_token=A__ ,pad_token=A__ ,unk_token=A__ ,line_token=A__ ,space_token=A__ ,padding_side=A__ ,**A__ ,)
_A : int = bod_token
_A : Tuple = eod_token
_A : Optional[int] = load_vocab(A__ )
_A : List[Any] = self.encoder[space_token]
_A : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_A : Tuple = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda A__ : x[1] ) )
_A : Tuple = {v: k for k, v in self.encoder.items()}
_A : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def A__ ( self ):
return self.encoder[self.bod_token]
@property
def A__ ( self ):
return self.encoder[self.eod_token]
@property
def A__ ( self ):
return self.encoder["\n"]
@property
def A__ ( self ):
return len(self.encoder )
def A__ ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self ,A__ ):
_A : Optional[Any] = []
for x in jieba.cut(A__ ,cut_all=A__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A__ ) )
return output_tokens
def A__ ( self ,A__ ,**A__ ):
_A : List[str] = [i for i in token_ids if i >= 0]
_A : List[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A__ ,**A__ )
def A__ ( self ,A__ ):
return token in self.encoder
def A__ ( self ,A__ ):
return "".join(A__ )
def A__ ( self ,A__ ):
return self.encoder.get(A__ ,self.encoder.get(self.unk_token ) )
def A__ ( self ,A__ ):
return self.decoder.get(A__ ,self.unk_token )
def A__ ( self ,A__ ,A__ = None ):
if os.path.isdir(A__ ):
_A : Tuple = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_A : Optional[Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
_A : List[Any] = 0
if " " in self.encoder:
_A : Optional[int] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
_A : List[str] = self.encoder['\n']
del self.encoder["\n"]
_A : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda A__ : x[1] ) )
with open(A__ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_A : str = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def A__ ( self ,A__ ,A__ = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ ( self ,A__ ,A__ = None ,A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__ )
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ ))
return [1] + ([0] * len(A__ ))
| 206 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[Any]=None , ) -> str:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase_ , )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = FalconModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = FalconModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# first forward pass
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )['hidden_states'][0]
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )['hidden_states'][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: Optional[int] = (FalconForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_: Optional[int] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: List[Any] = False
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = FalconModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase_ , *UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'single_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = FalconForCausalLM(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_lowerCAmelCase = input_ids.shape[0]
_lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
_lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase_ , UpperCAmelCase_ )
for layer in range(len(UpperCAmelCase_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'multi_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase_ , 'use_cache' ):
return
_lowerCAmelCase = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ )
if "use_cache" not in inputs:
_lowerCAmelCase = True
_lowerCAmelCase = model(**UpperCAmelCase_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCAmelCase = (
getattr(UpperCAmelCase_ , 'decoder_layers' , UpperCAmelCase_ )
or getattr(UpperCAmelCase_ , 'num_decoder_layers' , UpperCAmelCase_ )
or config.num_hidden_layers
)
_lowerCAmelCase = getattr(UpperCAmelCase_ , 'num_kv_heads' , config.num_attention_heads )
_lowerCAmelCase = getattr(UpperCAmelCase_ , 'd_model' , config.hidden_size )
_lowerCAmelCase = embed_dim // num_attention_heads
_lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
_lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase_ ):
if config.new_decoder_architecture:
_lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
_lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
_lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase_ )
_lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase_ )
_lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_lowerCAmelCase = model.generate(**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , max_new_tokens=19 )
_lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase_ )[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase_ )
model.eval()
model.to(UpperCAmelCase_ )
_lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , max_new_tokens=4 )
model.generate(**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , max_new_tokens=4 )
model.generate(**UpperCAmelCase_ , num_beams=2 , max_new_tokens=4 )
@slow
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase_ )
model.eval()
model.to(device=UpperCAmelCase_ )
_lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase_ )
# Test results are the same with and without cache
_lowerCAmelCase = model.generate(**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , max_new_tokens=20 , use_cache=UpperCAmelCase_ )
_lowerCAmelCase = model.generate(**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , max_new_tokens=20 , use_cache=UpperCAmelCase_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 580 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCAmelCase ( _UpperCamelCase ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase__: int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
lowerCamelCase__: int = xgb.predict(lowerCamelCase_ )
lowerCamelCase__: Optional[Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
lowerCamelCase__: Tuple = fetch_california_housing()
lowerCamelCase__: Any = data_handling(lowerCamelCase_ )
lowerCamelCase__: Optional[Any] = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1 )
lowerCamelCase__: Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 306 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCAmelCase__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCAmelCase__ = {
"abeja/gpt-neox-japanese-2.7b": 2_0_4_8,
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
lowercase__ : int = json.loads(f.read() )
lowercase__ : Optional[Any] = collections.OrderedDict()
lowercase__ : Any = collections.OrderedDict()
lowercase__ : Optional[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
lowercase__ : List[str] = f.readlines()
lowercase__ : List[str] = [[t.rstrip("\n" )] if (t == ',' or ',' not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
lowercase__ : Optional[Any] = b
lowercase__ : Dict = idx
for wd in b:
lowercase__ : Optional[int] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case__(_a ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[str]="<|startoftext|>" , SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , SCREAMING_SNAKE_CASE : Tuple=False , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , do_clean_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if not os.path.isfile(SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowercase__ : Tuple = do_clean_text
lowercase__ : Dict = load_vocab_and_emoji(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case ( self : Optional[Any] ):
return len(self.raw_vocab )
def snake_case ( self : Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
return self.subword_tokenizer.tokenize(SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
return self.vocab.get(SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
return self.subword_tokenizer.convert_id_to_token(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : str = ''.join(SCREAMING_SNAKE_CASE ).strip()
return out_string
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase__ : List[str] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] = None ):
lowercase__ : Tuple = 0
if os.path.isdir(SCREAMING_SNAKE_CASE ):
lowercase__ : int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowercase__ : Dict = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowercase__ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
lowercase__ : Union[str, Any] = token_index
writer.write(",".join(SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : List[Any] = vocab # same as swe
lowercase__ : int = ids_to_tokens # same as bpe
lowercase__ : Tuple = emoji
lowercase__ : Tuple = np.max([len(SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
lowercase__ : Dict = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowercase__ : Tuple = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowercase__ : List[Any] = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowercase__ : Union[str, Any] = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowercase__ : List[Any] = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowercase__ : Optional[Any] = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowercase__ : List[str] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowercase__ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowercase__ : str = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[Any] ):
return len(self.ids_to_tokens )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Union[str, Any] = self.content_repattera.sub("<URL>" , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.content_repattera.sub("<EMAIL>" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.content_repattera.sub("<TEL>" , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE )
lowercase__ : str = self.content_repattera.sub("<PRICE>" , SCREAMING_SNAKE_CASE )
lowercase__ : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase__ : Optional[Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
lowercase__ : int = text.replace(" " , "<SP>" )
lowercase__ : Optional[Any] = text.replace(" " , "<SP>" )
lowercase__ : Tuple = text.replace("\r\n" , "<BR>" )
lowercase__ : Tuple = text.replace("\n" , "<BR>" )
lowercase__ : Dict = text.replace("\r" , "<BR>" )
lowercase__ : str = text.replace("\t" , "<TAB>" )
lowercase__ : Optional[Any] = text.replace("—" , "ー" )
lowercase__ : Union[str, Any] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase__ : Optional[int] = text.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if clean:
lowercase__ : Tuple = self.clean_text(SCREAMING_SNAKE_CASE )
def check_simbol(SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = x.encode()
if len(SCREAMING_SNAKE_CASE ) == 1 and len(SCREAMING_SNAKE_CASE ) == 2:
lowercase__ : List[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2_A_1 and c <= 0xC_2_B_F)
or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3)
or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F)
or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2)
):
return True
return False
def checkuae(SCREAMING_SNAKE_CASE : Any ):
lowercase__ : int = x.encode()
if len(SCREAMING_SNAKE_CASE ) == 1 and len(SCREAMING_SNAKE_CASE ) == 3:
lowercase__ : Optional[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F:
return True
return False
lowercase__ : Dict = 0
lowercase__ : Optional[int] = []
while pos < len(SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = min(len(SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowercase__ : Optional[int] = [] # (token_id, token, pos)
for e in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ):
lowercase__ : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(SCREAMING_SNAKE_CASE ) > 2:
lowercase__ : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
lowercase__ : List[str] = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = e
else:
lowercase__ : Optional[int] = pos + 1
lowercase__ : int = text[pos:end]
if check_simbol(SCREAMING_SNAKE_CASE ):
result.append("<KIGOU>" )
elif checkuae(SCREAMING_SNAKE_CASE ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowercase__ : Optional[Any] = end
return result
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]="\n" ):
lowercase__ : List[Any] = []
lowercase__ : str = []
lowercase__ : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE ).decode("utf-8" , errors="replace" ) )
lowercase__ : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE ).decode("utf-8" , errors="replace" ) )
lowercase__ : Union[str, Any] = ''.join(SCREAMING_SNAKE_CASE )
return text
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowercase : str ="Create a default config file for Accelerate with only a few flags set."
def A__ ( lowercase: int="no", lowercase: List[str] = default_json_config_file, lowercase: Tuple = False ) -> Tuple:
A : Optional[Any] =Path(lowerCamelCase_ )
path.parent.mkdir(parents=lowerCamelCase_, exist_ok=lowerCamelCase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
A : Optional[Any] =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
A : Optional[Any] ={
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
A : str =torch.cuda.device_count()
A : Dict =num_gpus
A : List[Any] =False
if num_gpus > 1:
A : Dict ='MULTI_GPU'
else:
A : Optional[int] ='NO'
elif is_xpu_available() and use_xpu:
A : Any =torch.xpu.device_count()
A : List[str] =num_xpus
A : int =False
if num_xpus > 1:
A : Optional[Any] ='MULTI_XPU'
else:
A : Optional[Any] ='NO'
elif is_npu_available():
A : Union[str, Any] =torch.npu.device_count()
A : Dict =num_npus
A : str =False
if num_npus > 1:
A : List[str] ='MULTI_NPU'
else:
A : Union[str, Any] ='NO'
else:
A : int =0
A : Dict =True
A : Optional[int] =1
A : List[str] ='NO'
A : Tuple =ClusterConfig(**lowerCamelCase_ )
config.to_json_file(lowerCamelCase_ )
return path
def A__ ( lowercase: Union[str, Any], lowercase: List[Any] ) -> List[Any]:
A : Union[str, Any] =parser.add_parser('default', parents=lowerCamelCase_, help=lowerCamelCase_, formatter_class=lowerCamelCase_ )
parser.add_argument(
'--config_file', default=lowerCamelCase_, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), dest='save_location', )
parser.add_argument(
'--mixed_precision', choices=['no', 'fp16', 'bf16'], type=lowerCamelCase_, help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.', default='no', )
parser.set_defaults(func=lowerCamelCase_ )
return parser
def A__ ( lowercase: Tuple ) -> int:
A : List[str] =write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=False ) -> str:
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase = ''
else:
UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
UpperCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_(lowerCamelCase_ ) -> str:
UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
UpperCAmelCase = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = dct.pop(lowerCamelCase_ )
UpperCAmelCase = val
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> int:
UpperCAmelCase = ViTMSNConfig()
UpperCAmelCase = 1_000
UpperCAmelCase = 'datasets/huggingface/label-files'
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , "r" ) )
UpperCAmelCase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 6
elif "l16" in checkpoint_url:
UpperCAmelCase = 1_024
UpperCAmelCase = 4_096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = 0.1
elif "b4" in checkpoint_url:
UpperCAmelCase = 4
elif "l7" in checkpoint_url:
UpperCAmelCase = 7
UpperCAmelCase = 1_024
UpperCAmelCase = 4_096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = 0.1
UpperCAmelCase = ViTMSNModel(lowerCamelCase_ )
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )['target_encoder']
UpperCAmelCase = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
UpperCAmelCase = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
UpperCAmelCase = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase = model(**lowerCamelCase_ )
UpperCAmelCase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCAmelCase = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCAmelCase = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCAmelCase = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCAmelCase = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCAmelCase = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Union[str, Any] = "geglu" , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Any = False , UpperCAmelCase_ : str = False , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : int = True , UpperCAmelCase_ : List[str] = "layer_norm" , UpperCAmelCase_ : Optional[int] = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = only_cross_attention
SCREAMING_SNAKE_CASE : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
SCREAMING_SNAKE_CASE : List[str] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : Union[str, Any] = AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[str] = AdaLayerNormZero(UpperCAmelCase_ , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Attention(
query_dim=UpperCAmelCase_ , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE : Dict = (
AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : str = Attention(
query_dim=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , upcast_attention=UpperCAmelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE : Dict = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = FeedForward(UpperCAmelCase_ , dropout=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , final_dropout=UpperCAmelCase_ )
# let chunk size default to None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = 0
def _A ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Any = chunk_size
SCREAMING_SNAKE_CASE : Union[str, Any] = dim
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Union[str, Any] = None , ):
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : List[str] = self.norma(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Any = self.norma(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE : Any = self.attna(
UpperCAmelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE : List[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE : List[Any] = (
self.norma(UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : Any = self.attna(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE : List[str] = self.norma(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[self.ff(UpperCAmelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.ff(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[str] = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE : Tuple = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = 4 , UpperCAmelCase_ : Any = 0.0 , UpperCAmelCase_ : Dict = "geglu" , UpperCAmelCase_ : str = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = int(dim * mult )
SCREAMING_SNAKE_CASE : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE : Union[str, Any] = GELU(UpperCAmelCase_ , UpperCAmelCase_ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE : Optional[Any] = GELU(UpperCAmelCase_ , UpperCAmelCase_ , approximate="tanh" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE : str = GEGLU(UpperCAmelCase_ , UpperCAmelCase_ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE : Union[str, Any] = ApproximateGELU(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([] )
# project in
self.net.append(UpperCAmelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
for module in self.net:
SCREAMING_SNAKE_CASE : Union[str, Any] = module(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str = "none" ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = approximate
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.gelu(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , dim_out * 2 )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _A ( self : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = self.proj(UpperCAmelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.proj(UpperCAmelCase_ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.linear(self.silu(self.emb(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : int = torch.chunk(UpperCAmelCase_ , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.norm(UpperCAmelCase_ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = CombinedTimestepLabelEmbeddings(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.SiLU()
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , 6 * embedding_dim , bias=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ , eps=1E-6 )
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[int] = self.linear(self.silu(self.emb(UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE : Dict = self.norm(UpperCAmelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Dict = 1E-5 ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE : Any = eps
if act_fn is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
else:
SCREAMING_SNAKE_CASE : Any = get_activation(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.Linear(UpperCAmelCase_ , out_dim * 2 )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
if self.act:
SCREAMING_SNAKE_CASE : Optional[int] = self.act(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.linear(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = emb[:, :, None, None]
SCREAMING_SNAKE_CASE : Optional[Any] = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE : Any = F.group_norm(UpperCAmelCase_ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE : Optional[Any] = x * (1 + scale) + shift
return x
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def remove_articles(lowerCamelCase__ ):
__lowerCamelCase : Any = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase_ , ' ' , lowerCamelCase_ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = [any(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for ref in refs ) for pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ )]
return (sum(lowerCamelCase_ ) / len(lowerCamelCase_ )) * 1_0_0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase : Any = Counter(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = Counter(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase : Optional[Any] = scount * numref
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase_ )
__lowerCamelCase : List[Any] = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase : List[str] = ccount * numref
# KEEP
__lowerCamelCase : Tuple = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase : int = keepgramcounter_rep & rgramcounter
__lowerCamelCase : List[Any] = sgramcounter_rep & rgramcounter
__lowerCamelCase : str = 0
__lowerCamelCase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Optional[int] = keeptmpscorea / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase : Optional[int] = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase : Optional[int] = delgramcounter_rep - rgramcounter
__lowerCamelCase : Dict = sgramcounter_rep - rgramcounter
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : int = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Tuple = deltmpscorea / len(lowerCamelCase_ )
# ADDITION
__lowerCamelCase : Any = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = set(lowerCamelCase_ ) & set(lowerCamelCase_ )
__lowerCamelCase : int = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
__lowerCamelCase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Optional[int] = addtmpscore / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Any = addtmpscore / len(lowerCamelCase_ )
__lowerCamelCase : Optional[int] = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : List[str] = len(lowerCamelCase_ )
__lowerCamelCase : List[str] = ssent.split(' ' )
__lowerCamelCase : Optional[int] = csent.split(' ' )
__lowerCamelCase : Dict = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = []
for rsent in rsents:
__lowerCamelCase : Optional[int] = rsent.split(' ' )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = []
__lowerCamelCase : Dict = []
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : List[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : Optional[Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : Dict = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : Tuple = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : str = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : List[str] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase_ )
(__lowerCamelCase) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : int = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : Optional[int] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowerCamelCase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = "13a" , lowerCamelCase__ = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__lowerCamelCase : int = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase_ )()(lowerCamelCase_ )
else:
__lowerCamelCase : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase_ )
elif tokenizer == "moses":
__lowerCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ , escape=lowerCamelCase_ )
elif tokenizer == "penn":
__lowerCamelCase : Dict = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ )
else:
__lowerCamelCase : Any = sentence
if not return_str:
__lowerCamelCase : str = normalized_sent.split()
return normalized_sent
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
if not (len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == len(lowerCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__lowerCamelCase : Optional[Any] = 0
for src, pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
sari_score += SARIsent(normalize(lowerCamelCase_ ) , normalize(lowerCamelCase_ ) , [normalize(lowerCamelCase_ ) for sent in refs] )
__lowerCamelCase : Dict = sari_score / len(lowerCamelCase_ )
return 1_0_0 * sari_score
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="exp" , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ) -> List[str]:
__lowerCamelCase : Tuple = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__lowerCamelCase : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
__lowerCamelCase : List[Any] = sacrebleu.corpus_bleu(
lowerCamelCase_ , lowerCamelCase_ , smooth_method=lowerCamelCase_ , smooth_value=lowerCamelCase_ , force=lowerCamelCase_ , lowercase=lowerCamelCase_ , use_effective_order=lowerCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence') ,id='references'),
}) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Union[str, Any] = {}
result.update({'sari': compute_sari(sources=SCREAMING_SNAKE_CASE__ ,predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'sacrebleu': compute_sacrebleu(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'exact': compute_em(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
return result
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=None ) -> Optional[int]:
if rng is None:
A_ : List[Any] = random.Random()
A_ : Dict = 1
for dim in shape:
total_dims *= dim
A_ : Union[str, Any] = []
for _ in range(lowerCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A_ : Any = np.array(lowerCamelCase_ , dtype=jnp.intaa ).reshape(lowerCamelCase_ )
return output
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str]=None ) -> str:
A_ : Dict = ids_tensor(lowerCamelCase_ , vocab_size=2 , rng=lowerCamelCase_ )
# make sure that at least one token is attended to for each batch
A_ : str = 1
return attn_mask
@require_flax
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = ()
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A_ : List[str] = 2
A_ : int = inputs['input_ids'].shape[-1] // 2
A_ : str = inputs['input_ids'][:max_batch_size, :sequence_length]
A_ : int = jnp.ones_like(snake_case )
A_ : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A_ : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A_ : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : List[str] = False
A_ : List[str] = max_length
A_ : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ : Tuple = getattr(snake_case , snake_case )
A_ : Dict = pt_model_class(snake_case ).eval()
A_ : Tuple = load_flax_weights_in_pytorch_model(snake_case , flax_model.params )
A_ : Optional[Any] = flax_model.generate(snake_case ).sequences
A_ : Optional[Any] = pt_model.generate(torch.tensor(snake_case , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A_ : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : Union[str, Any] = False
A_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(snake_case )
A_ : str = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Tuple = jit(model.generate )
A_ : Union[str, Any] = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self._get_input_ids_and_config()
A_ : Any = True
A_ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(snake_case )
A_ : int = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : str = jit(model.generate )
A_ : str = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self._get_input_ids_and_config()
A_ : List[str] = False
A_ : Optional[Any] = max_length
A_ : str = 2
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(snake_case )
A_ : Dict = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Optional[Any] = jit(model.generate )
A_ : List[str] = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self._get_input_ids_and_config()
A_ : Dict = False
A_ : List[Any] = max_length
A_ : int = 2
A_ : Any = 2
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(snake_case )
A_ : Dict = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self._get_input_ids_and_config()
A_ : List[Any] = True
A_ : Optional[Any] = max_length
A_ : int = 0.8
A_ : Optional[Any] = 10
A_ : Dict = 0.3
A_ : Union[str, Any] = 1
A_ : Union[str, Any] = 8
A_ : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(snake_case )
A_ : str = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : List[str] = jit(model.generate )
A_ : List[Any] = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[str] = self._get_input_ids_and_config()
A_ : List[str] = max_length
A_ : List[Any] = 1
A_ : Optional[Any] = 8
A_ : List[Any] = 9
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(snake_case )
A_ : List[str] = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Dict = jit(model.generate )
A_ : List[Any] = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : Union[str, Any] = max_length
A_ : Optional[int] = 2
A_ : Dict = 1
A_ : int = 8
A_ : Optional[int] = 9
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(snake_case )
A_ : int = model.generate(snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Any = jit(model.generate )
A_ : str = jit_generate(snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Any = attention_mask.at[(0, 0)].set(0 )
A_ : Union[str, Any] = False
A_ : Any = max_length
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(snake_case )
A_ : Any = model.generate(snake_case , attention_mask=snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : List[str] = jit(model.generate )
A_ : List[str] = jit_generate(snake_case , attention_mask=snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Optional[int] = attention_mask.at[(0, 0)].set(0 )
A_ : Tuple = True
A_ : int = max_length
for model_class in self.all_generative_model_classes:
A_ : int = model_class(snake_case )
A_ : Optional[int] = model.generate(snake_case , attention_mask=snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Optional[Any] = jit(model.generate )
A_ : Any = jit_generate(snake_case , attention_mask=snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : List[str] = attention_mask.at[(0, 0)].set(0 )
A_ : List[Any] = 2
A_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Optional[Any] = model.generate(snake_case , attention_mask=snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case )
A_ : Tuple = jit(model.generate )
A_ : int = jit_generate(snake_case , attention_mask=snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
A_ : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A_ : Optional[int] = 'Hello world'
A_ : List[Any] = tokenizer(snake_case , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case , "do_samples" ):
model.generate(snake_case , do_samples=snake_case )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case , "foo" ):
A_ : Dict = {'foo': 'bar'}
model.generate(snake_case , **snake_case )
| 454 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class UpperCAmelCase ( _a ):
'''simple docstring'''
snake_case_ = ["""pixel_values"""]
def __init__( self : Tuple ,A : Tuple = True ,A : Union[str, Any] = None ,A : Optional[int] = PILImageResampling.BILINEAR ,A : Tuple = True ,A : Tuple = 1 / 2_55 ,A : Dict = True ,A : Union[str, Any] = None ,A : Optional[Any] = True ,**A : Dict ,):
super().__init__(**A )
__A = size if size is not None else {'shortest_edge': 2_24}
__A = get_size_dict(A ,default_to_square=A )
__A = crop_size if crop_size is not None else {'height': 2_56, 'width': 2_56}
__A = get_size_dict(A ,param_name="crop_size" )
__A = do_resize
__A = size
__A = resample
__A = do_rescale
__A = rescale_factor
__A = do_center_crop
__A = crop_size
__A = do_flip_channel_order
def UpperCamelCase_ ( self : List[str] ,A : Tuple ,A : Any ,A : Tuple = PIL.Image.BILINEAR ,A : List[Any] = None ,**A : Union[str, Any] ,):
__A = get_size_dict(A ,default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__A = get_resize_output_image_size(A ,size=size["shortest_edge"] ,default_to_square=A )
return resize(A ,size=A ,resample=A ,data_format=A ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : Dict ,A : Dict = None ,**A : Any ,):
__A = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A ,size=(size["height"], size["width"]) ,data_format=A ,**A )
def UpperCamelCase_ ( self : str ,A : Any ,A : Tuple ,A : Any = None ,**A : List[Any] ,):
return rescale(A ,scale=A ,data_format=A ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : str ,A : int = None ):
return flip_channel_order(A ,data_format=A )
def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : List[Any] = None ,A : Any = None ,A : Optional[Any] = None ,A : List[str] = None ,A : List[str] = None ,A : List[Any] = None ,A : Union[str, Any] = None ,A : Optional[Any] = None ,A : Dict = None ,A : Dict = ChannelDimension.FIRST ,**A : Optional[Any] ,):
__A = do_resize if do_resize is not None else self.do_resize
__A = resample if resample is not None else self.resample
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__A = size if size is not None else self.size
__A = get_size_dict(A ,default_to_square=A )
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(A ,param_name="crop_size" )
__A = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__A = [to_numpy_array(A ) for image in images]
if do_resize:
__A = [self.resize(image=A ,size=A ,resample=A ) for image in images]
if do_center_crop:
__A = [self.center_crop(image=A ,size=A ) for image in images]
if do_rescale:
__A = [self.rescale(image=A ,scale=A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__A = [self.flip_channel_order(image=A ) for image in images]
__A = [to_channel_dimension_format(A ,A ) for image in images]
__A = {'pixel_values': images}
return BatchFeature(data=A ,tensor_type=A )
def UpperCamelCase_ ( self : Tuple ,A : Optional[int] ,A : List[Any] = None ):
__A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A ):
__A = target_sizes.numpy()
__A = []
for idx in range(len(A ) ):
__A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="bilinear" ,align_corners=A )
__A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
__A = logits.argmax(dim=1 )
__A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCamelCase : str ={
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class UpperCAmelCase__ ( _a ):
__snake_case : str = """xglm"""
__snake_case : Dict = ["""past_key_values"""]
__snake_case : str = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self ,A__=256008 ,A__=2048 ,A__=1024 ,A__=4096 ,A__=24 ,A__=16 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=True ,A__=True ,A__=2 ,A__=1 ,A__=0 ,A__=2 ,**A__ ,):
_A : List[Any] = vocab_size
_A : Union[str, Any] = max_position_embeddings
_A : Tuple = d_model
_A : Union[str, Any] = ffn_dim
_A : str = num_layers
_A : int = attention_heads
_A : List[str] = activation_function
_A : Tuple = dropout
_A : int = attention_dropout
_A : Optional[int] = activation_dropout
_A : List[str] = layerdrop
_A : Optional[Any] = init_std
_A : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_A : Union[str, Any] = use_cache
super().__init__(
pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,decoder_start_token_id=A__ ,**A__ ,)
| 206 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_snake_case = {
"google/reformer-crime-and-punishment": 5_2_4_2_8_8,
}
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Dict=[] , UpperCAmelCase_ : List[str] = None , **UpperCAmelCase_ : Any , ) -> None:
"""simple docstring"""
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def __lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self : Dict ) -> Dict[str, int]:
"""simple docstring"""
_lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : Any , UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_lowerCAmelCase = self.sp_model.IdToPiece(UpperCAmelCase_ )
return token
def __lowerCamelCase ( self : int , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(_a )
class lowerCamelCase__ ( _a ):
__lowerCamelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : List[Any] , *__a : int , **__a : List[Any] ):
'''simple docstring'''
super().__init__(*__a , **__a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__: int = None
if self.model.config.prefix is not None:
lowerCamelCase__: str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__: str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__: Optional[Any] = self._sanitize_parameters(prefix=__a , **self._forward_params )
lowerCamelCase__: Dict = {**self._preprocess_params, **preprocess_params}
lowerCamelCase__: str = {**self._forward_params, **forward_params}
def lowerCamelCase_ ( self : str , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : List[Any]=None , __a : List[str]=None , **__a : Any , ):
'''simple docstring'''
lowerCamelCase__: List[str] = {}
if prefix is not None:
lowerCamelCase__: str = prefix
if prefix:
lowerCamelCase__: Dict = self.tokenizer(
__a , padding=__a , add_special_tokens=__a , return_tensors=self.framework )
lowerCamelCase__: Any = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, \'hole\']""" )
lowerCamelCase__: str = handle_long_generation
preprocess_params.update(__a )
lowerCamelCase__: Optional[int] = generate_kwargs
lowerCamelCase__: Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase__: Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase__: List[str] = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__: Tuple = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__: Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__: List[Any] = self.tokenizer.encode(__a , add_special_tokens=__a )
if len(__a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase__: Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase_ ( self : Optional[int] , *__a : Dict , **__a : List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__a , **__a )
def __call__( self : List[str] , __a : str , **__a : Any ):
'''simple docstring'''
return super().__call__(__a , **__a )
def lowerCamelCase_ ( self : Optional[int] , __a : List[Any] , __a : Dict="" , __a : List[Any]=None , **__a : Tuple ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.tokenizer(
prefix + prompt_text , padding=__a , add_special_tokens=__a , return_tensors=self.framework )
lowerCamelCase__: Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCamelCase__: Dict = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__: Any = generate_kwargs['max_new_tokens']
else:
lowerCamelCase__: Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__: Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCamelCase__: Tuple = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__: Optional[int] = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCamelCase_ ( self : Any , __a : Union[str, Any] , **__a : Any ):
'''simple docstring'''
lowerCamelCase__: Any = model_inputs['input_ids']
lowerCamelCase__: str = model_inputs.get("""attention_mask""" , __a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__: List[str] = None
lowerCamelCase__: int = None
lowerCamelCase__: str = 1
else:
lowerCamelCase__: Dict = input_ids.shape[0]
lowerCamelCase__: int = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__: Optional[int] = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCamelCase__: int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__: Union[str, Any] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__: str = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__: Dict = self.model.generate(input_ids=__a , attention_mask=__a , **__a )
lowerCamelCase__: int = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__: Optional[Any] = generated_sequence.reshape(__a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__: Optional[int] = tf.reshape(__a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCamelCase_ ( self : int , __a : Union[str, Any] , __a : Optional[int]=ReturnType.FULL_TEXT , __a : Optional[Any]=True ):
'''simple docstring'''
lowerCamelCase__: Tuple = model_outputs['generated_sequence'][0]
lowerCamelCase__: str = model_outputs['input_ids']
lowerCamelCase__: Any = model_outputs['prompt_text']
lowerCamelCase__: str = generated_sequence.numpy().tolist()
lowerCamelCase__: Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__: Dict = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__: Union[str, Any] = self.tokenizer.decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__: Union[str, Any] = 0
else:
lowerCamelCase__: Dict = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__: int = prompt_text + text[prompt_length:]
else:
lowerCamelCase__: List[str] = text[prompt_length:]
lowerCamelCase__: Dict = {'generated_text': all_text}
records.append(__a )
return records
| 306 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 0 |
lowerCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE_ ( _a ):
'''simple docstring'''
lowercase : Dict = """Wav2Vec2FeatureExtractor"""
lowercase : Tuple = """AutoTokenizer"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =self.feature_extractor
A : Dict =False
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
try:
return super().from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : int =WavaVecaCTCTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A : Any =kwargs.pop('raw_speech' )
else:
A : Union[str, Any] =kwargs.pop('audio' , SCREAMING_SNAKE_CASE__ )
A : int =kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('text' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A : Optional[int] =args[0]
A : Union[str, Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A : Union[str, Any] =self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
A : List[str] =self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : Any =encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
if self._in_target_context_manager:
return self.current_processor.pad(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('input_features' , SCREAMING_SNAKE_CASE__ )
A : Dict =kwargs.pop('labels' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A : List[str] =args[0]
A : List[Any] =args[1:]
if input_features is not None:
A : Optional[int] =self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is not None:
A : Dict =self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A : Any =labels['input_ids']
return input_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A : Optional[int] =True
A : int =self.tokenizer
yield
A : Any =self.feature_extractor
A : List[Any] =False
| 305 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : str = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCamelCase : Any = spec.loader.load_module()
__lowerCamelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__lowerCamelCase : Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_() -> List[Any]:
UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase = False
# source code of `config_class`
UpperCAmelCase = inspect.getsource(lowerCamelCase_ )
UpperCAmelCase = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase = True
break
UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase = '\n'.join(sorted(lowerCamelCase_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 323 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase_ ) < version.parse("0.17.0" ):
return method
def wrapper(self , *lowercase , **lowercase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCamelCase_ , **lowerCamelCase_ )
return wrapper
| 62 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class A_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(features=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
import torch
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(SCREAMING_SNAKE_CASE__)
return column
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
import torch
if isinstance(SCREAMING_SNAKE_CASE__ ,(str, bytes, type(SCREAMING_SNAKE_CASE__))):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ ,(np.character, np.ndarray)) and np.issubdtype(value.dtype ,np.character):
return value.tolist()
__lowerCamelCase : Optional[int] = {}
if isinstance(SCREAMING_SNAKE_CASE__ ,(np.number, np.ndarray)) and np.issubdtype(value.dtype ,np.integer):
__lowerCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ ,(np.number, np.ndarray)) and np.issubdtype(value.dtype ,np.floating):
__lowerCamelCase : Any = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ ,PIL.Image.Image):
__lowerCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE__)
return torch.tensor(SCREAMING_SNAKE_CASE__ ,**{**default_dtype, **self.torch_tensor_kwargs})
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int):
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE__ ,'__array__') and not isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor):
__lowerCamelCase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__) for substruct in data_struct])
elif isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple)):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__) for substruct in data_struct])
return self._tensorize(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
return map_nested(self._recursive_tensorize ,SCREAMING_SNAKE_CASE__ ,map_list=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Optional[Any] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__)
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ ,pa_table.column_names[0])
__lowerCamelCase : Union[str, Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self._consolidate(SCREAMING_SNAKE_CASE__)
return column
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : str = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE__)
for column_name in batch:
__lowerCamelCase : Tuple = self._consolidate(batch[column_name])
return batch
| 652 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 454 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
SCREAMING_SNAKE_CASE : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE : Any = [sys.executable] + distributed_args
execute_subprocess_async(A, env=os.environ.copy() )
| 28 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : List[str] ,A : List[str]=13 ,A : Optional[Any]=7 ,A : int=True ,A : List[Any]=True ,A : Tuple=True ,A : Union[str, Any]=True ,A : List[Any]=99 ,A : Tuple=[1, 1, 2] ,A : str=1 ,A : Dict=32 ,A : List[Any]=4 ,A : List[str]=8 ,A : Optional[int]=37 ,A : Tuple="gelu_new" ,A : int=0.1 ,A : Tuple=0.1 ,A : Tuple=0.0 ,A : Tuple=5_12 ,A : Optional[Any]=3 ,A : Dict=0.02 ,A : List[str]=3 ,A : Union[str, Any]=4 ,A : Optional[Any]=None ,A : str=False ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = block_sizes
__A = num_decoder_layers
__A = d_model
__A = n_head
__A = d_head
__A = d_inner
__A = hidden_act
__A = hidden_dropout
__A = attention_dropout
__A = activation_dropout
__A = max_position_embeddings
__A = type_vocab_size
__A = 2
__A = num_labels
__A = num_choices
__A = scope
__A = initializer_std
# Used in the tests to check the size of the first attention layer
__A = n_head
# Used in the tests to check the size of the first hidden state
__A = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__A = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__A = self.num_hidden_layers + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self : Tuple ,A : Union[str, Any] ,A : int ,A : str ,A : Union[str, Any] ,A : Union[str, Any] ,A : Tuple ,A : Optional[Any] ,):
__A = TFFunnelModel(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
__A = [input_ids, input_mask]
__A = model(A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__A = False
__A = TFFunnelModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__A = False
__A = TFFunnelModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[str] ,A : Any ,A : int ,A : Tuple ,A : Optional[Any] ,A : List[str] ,):
__A = TFFunnelBaseModel(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
__A = [input_ids, input_mask]
__A = model(A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__A = False
__A = TFFunnelBaseModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__A = False
__A = TFFunnelBaseModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Optional[Any] ,A : int ,A : Optional[Any] ,A : Any ,A : Dict ,A : Optional[Any] ,A : Optional[int] ,):
__A = TFFunnelForPreTraining(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Optional[Any] ,A : int ,A : Any ,A : str ,A : Optional[int] ,A : Union[str, Any] ,A : Dict ,A : List[Any] ,):
__A = TFFunnelForMaskedLM(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict ,A : Optional[Any] ,A : Tuple ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : int ,A : Dict ,):
__A = self.num_labels
__A = TFFunnelForSequenceClassification(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] ,A : List[str] ,A : Dict ,A : int ,A : Dict ,A : List[Any] ,A : str ,A : Dict ,):
__A = self.num_choices
__A = TFFunnelForMultipleChoice(config=A )
__A = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
__A = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
__A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ,A : Any ,A : str ,A : Optional[Any] ,A : List[Any] ,A : Union[str, Any] ,A : Tuple ,):
__A = self.num_labels
__A = TFFunnelForTokenClassification(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Optional[Any] ,A : str ,A : Any ,A : Dict ,A : Union[str, Any] ,A : Union[str, Any] ,):
__A = TFFunnelForQuestionAnswering(config=A )
__A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.prepare_config_and_inputs()
(
__A
) = config_and_inputs
__A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _a , _a , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Tuple ):
__A = TFFunnelModelTester(self )
__A = ConfigTester(self ,config_class=A )
def UpperCamelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@require_tf
class UpperCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : List[str] ):
__A = TFFunnelModelTester(self ,base=A )
__A = ConfigTester(self ,config_class=A )
def UpperCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*A )
def UpperCamelCase_ ( self : List[str] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : List[str] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
| 55 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
def a__ (__lowercase :List[Any] , __lowercase :Dict , __lowercase :str ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def a__ (__lowercase :Union[str, Any] , __lowercase :List[Any] , __lowercase :Optional[Any] ) -> List[Any]:
_A : Optional[int] = to_pil_image(lowerCamelCase_ )
_A : List[Any] = pil_image.size
_A : Tuple = pytesseract.image_to_data(lowerCamelCase_ , lang=lowerCamelCase_ , output_type='''dict''' , config=lowerCamelCase_ )
_A : Optional[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_A : List[Any] = [idx for idx, word in enumerate(lowerCamelCase_ ) if not word.strip()]
_A : Any = [word for idx, word in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Union[str, Any] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Dict = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : List[str] = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
_A : Tuple = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A : str = []
for x, y, w, h in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_A : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase_ )
# finally, normalize the bounding boxes
_A : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase__ ( _a ):
__snake_case : Tuple = ["""pixel_values"""]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = None ,A__ = None ,A__ = True ,A__ = None ,A__ = "" ,**A__ ,):
super().__init__(**A__ )
_A : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
_A : Dict = get_size_dict(A__ )
_A : int = do_resize
_A : Any = size
_A : List[Any] = resample
_A : Optional[Any] = do_rescale
_A : Optional[Any] = rescale_value
_A : Dict = do_normalize
_A : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_A : List[str] = apply_ocr
_A : Any = ocr_lang
_A : Optional[int] = tesseract_config
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
_A : List[Any] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
_A : Any = (size['height'], size['width'])
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : int = do_resize if do_resize is not None else self.do_resize
_A : int = size if size is not None else self.size
_A : int = get_size_dict(A__ )
_A : Tuple = resample if resample is not None else self.resample
_A : int = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_A : Any = image_mean if image_mean is not None else self.image_mean
_A : Optional[int] = image_std if image_std is not None else self.image_std
_A : int = apply_ocr if apply_ocr is not None else self.apply_ocr
_A : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
_A : Any = tesseract_config if tesseract_config is not None else self.tesseract_config
_A : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(A__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'''pytesseract''' )
_A : Optional[Any] = []
_A : Tuple = []
for image in images:
_A : Optional[int] = apply_tesseract(A__ ,A__ ,A__ )
words_batch.append(A__ )
boxes_batch.append(A__ )
if do_resize:
_A : Tuple = [self.resize(image=A__ ,size=A__ ,resample=A__ ) for image in images]
if do_rescale:
_A : str = [self.rescale(image=A__ ,scale=A__ ) for image in images]
if do_normalize:
_A : str = [self.normalize(image=A__ ,mean=A__ ,std=A__ ) for image in images]
_A : List[str] = [to_channel_dimension_format(A__ ,A__ ) for image in images]
_A : List[Any] = BatchFeature(data={'''pixel_values''': images} ,tensor_type=A__ )
if apply_ocr:
_A : List[str] = words_batch
_A : Tuple = boxes_batch
return data
| 206 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
def __init__( self : Any , **UpperCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] = None , **UpperCAmelCase_ : int , ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
_lowerCAmelCase = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase_ , (str, Image.Image) ):
_lowerCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
_lowerCAmelCase = image
_lowerCAmelCase = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
return results
def __lowerCamelCase ( self : int , **UpperCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = {}
if "threshold" in kwargs:
_lowerCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
_lowerCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = load_image(inputs['image'] )
_lowerCAmelCase = inputs['candidate_labels']
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = candidate_labels.split(',' )
_lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase_ ):
_lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework )
_lowerCAmelCase = self.image_processor(UpperCAmelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop('target_size' )
_lowerCAmelCase = model_inputs.pop('candidate_label' )
_lowerCAmelCase = model_inputs.pop('is_last' )
_lowerCAmelCase = self.model(**UpperCAmelCase_ )
_lowerCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=None ) -> Any:
"""simple docstring"""
_lowerCAmelCase = []
for model_output in model_outputs:
_lowerCAmelCase = model_output['candidate_label']
_lowerCAmelCase = BaseModelOutput(UpperCAmelCase_ )
_lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase = outputs['scores'][index].item()
_lowerCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
_lowerCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase_ )
_lowerCAmelCase = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x["score"] , reverse=UpperCAmelCase_ )
if top_k:
_lowerCAmelCase = results[:top_k]
return results
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
_lowerCAmelCase = box.int().tolist()
_lowerCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 580 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> float:
'''simple docstring'''
lowerCamelCase__: int = sorted(numsa + numsa )
lowerCamelCase__: List[str] = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowercase = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 306 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = int(lowerCamelCase_ )
lowercase__ : Optional[Any] = t // 3_600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=300 ):
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
"""
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase__ : Any = F"""{elt:.6f}""" if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case__:
"""simple docstring"""
lowercase_ = 5
lowercase_ = 0.2
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = True , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : List[str] = 300 , ):
lowercase__ : Optional[int] = total
lowercase__ : Optional[int] = '' if prefix is None else prefix
lowercase__ : Tuple = leave
lowercase__ : str = parent
lowercase__ : str = width
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
lowercase__ : Tuple = None
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str = False , SCREAMING_SNAKE_CASE : str = None ):
lowercase__ : Any = value
if comment is not None:
lowercase__ : Union[str, Any] = comment
if self.last_value is None:
lowercase__ : Dict = time.time()
lowercase__ : Tuple = value
lowercase__ : str = None
lowercase__ : Optional[int] = self.warmup
lowercase__ : Optional[Any] = 1
self.update_bar(SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase__ : List[str] = time.time()
lowercase__ : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase__ : Dict = self.elapsed_time / (value - self.start_value)
else:
lowercase__ : int = None
if value >= self.total:
lowercase__ : Dict = self.total
lowercase__ : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase__ : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE )
lowercase__ : int = value
lowercase__ : Tuple = current_time
if self.average_time_per_item is None:
lowercase__ : str = 1
else:
lowercase__ : int = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : List[Any] = ' ' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE ) )) + str(SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
lowercase__ : int = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase__ : Union[str, Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase__ : Union[str, Any] = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case ( self : Dict ):
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase__ : Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : Any ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = None if column_names is None else [column_names]
lowercase__ : Any = None
def snake_case ( self : List[Any] ):
lowercase__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase__ : Dict = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
if self.inner_table is None:
lowercase__ : Dict = [list(values.keys() ), list(values.values() )]
else:
lowercase__ : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE )
lowercase__ : str = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=300 ):
lowercase__ : List[str] = NotebookProgressBar(SCREAMING_SNAKE_CASE , prefix=SCREAMING_SNAKE_CASE , parent=self , width=SCREAMING_SNAKE_CASE )
return self.child_bar
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = None
self.display()
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : Any ):
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = None
lowercase__ : Dict = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
lowercase__ : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
lowercase__ : Dict = 0
lowercase__ : Tuple = 0
lowercase__ : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
lowercase__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowercase__ : str = False
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Tuple ):
if not has_length(SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase__ : Optional[int] = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE ) )
else:
lowercase__ : Optional[int] = NotebookProgressBar(len(SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase__ : Any = None
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Optional[int] ):
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase__ : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase__ : List[Any] = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
if self.training_tracker is not None:
lowercase__ : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase__ : int = log['loss']
break
if self.first_column == "Epoch":
lowercase__ : Union[str, Any] = int(state.epoch )
else:
lowercase__ : Optional[Any] = state.global_step
lowercase__ : str = 'eval'
for k in metrics:
if k.endswith("_loss" ):
lowercase__ : str = re.sub(r"\_loss$" , "" , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = metrics.pop("total_flos" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = metrics.pop("epoch" , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = metrics.pop(f"""{metric_key_prefix}_runtime""" , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowercase__ : Union[str, Any] = v
else:
lowercase__ : Optional[Any] = k.split("_" )
lowercase__ : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]] )
lowercase__ : Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
lowercase__ : str = None
# Evaluation takes a long time so we should force the next update.
lowercase__ : Optional[Any] = True
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=SCREAMING_SNAKE_CASE )
lowercase__ : Any = None
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
def A__ ( lowercase: Optional[Any] ) -> tuple[int, int]:
try:
A : int =float(lowerCamelCase_ )
except ValueError:
raise ValueError('Please enter a valid number' )
A : Optional[int] =decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
A : Dict =len(str(lowerCamelCase_ ).split('.' )[1] )
A : Any =int(decimal * (10**number_of_frac_digits) )
A : Tuple =10**number_of_frac_digits
A : Tuple =denominator, numerator
while True:
A : List[Any] =dividend % divisor
if remainder == 0:
break
A : Optional[int] =divisor, remainder
A : Optional[Any] =numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction('67') = }''')
print(f'''{decimal_to_fraction('45.0') = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction('6.25') = }''')
print(f'''{decimal_to_fraction('78td') = }''')
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase_(lowerCamelCase_ ) -> typing.Counter[int]:
UpperCAmelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase_ , max_perimeter + 1 ):
UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase_ ):
UpperCAmelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase_(lowerCamelCase_ = 1_000 ) -> int:
UpperCAmelCase = pythagorean_triple(lowerCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 323 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE : set[int] = set()
return any(
node not in visited and depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for node in graph )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
visited.add(lowerCamelCase_ )
rec_stk.add(lowerCamelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a =TypeVar("""T""")
a =TypeVar("""U""")
class A_ ( Generic[T, U] ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[int] = key
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
class A_ ( Generic[T, U] ):
def __init__( self : Tuple):
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.rear, self.head
def __repr__( self : Tuple):
__lowerCamelCase : int = ['DoubleLinkedList']
__lowerCamelCase : Optional[int] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = node.next
rep.append(str(self.rear))
return ",\n ".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowerCamelCase : Union[str, Any] = node
__lowerCamelCase : Dict = previous
__lowerCamelCase : Union[str, Any] = node
__lowerCamelCase : Union[str, Any] = self.rear
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict):
if node.prev is None or node.next is None:
return None
__lowerCamelCase : Union[str, Any] = node.next
__lowerCamelCase : Union[str, Any] = node.prev
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = None
return node
class A_ ( Generic[T, U] ):
_UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
__lowerCamelCase : Optional[Any] = capacity
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : int):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return key in self.cache
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
if key in self.cache:
self.hits += 1
__lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
__lowerCamelCase : List[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__)
return node.val
self.miss += 1
return None
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowerCamelCase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowerCamelCase : Union[str, Any] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowerCamelCase : int = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
__lowerCamelCase : str = value
self.list.add(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_2_8):
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ : Tuple) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ : str) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowerCamelCase : int = LRUCache(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
__lowerCamelCase : List[Any] = func(*SCREAMING_SNAKE_CASE__)
cls.decorator_function_to_instance_map[func].put(args[0] ,SCREAMING_SNAKE_CASE__)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ ,'cache_info' ,SCREAMING_SNAKE_CASE__) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( _a ):
"""simple docstring"""
__UpperCamelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__UpperCamelCase = field(default=_a , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__UpperCamelCase = field(
default=_a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__UpperCamelCase = field(default=_a , metadata={'''help''': '''whether to use adafactor'''} )
__UpperCamelCase = field(
default=_a , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__UpperCamelCase = field(
default=_a , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__UpperCamelCase = field(default=_a , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__UpperCamelCase = field(
default=_a , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__UpperCamelCase = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 454 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, A )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(A, A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(A, 0, -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = CLIPSegProcessor(tokenizer=A, image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPSegProcessor(tokenizer=A, image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, A )
self.assertIsInstance(processor_fast.tokenizer, A )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, A )
self.assertIsInstance(processor_fast.image_processor, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor(do_normalize=A, padding_value=1.0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=A, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, A )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPSegProcessor(tokenizer=A, image_processor=A )
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = image_processor(A, return_tensors='np' )
SCREAMING_SNAKE_CASE : Optional[int] = processor(images=A, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = CLIPSegProcessor(tokenizer=A, image_processor=A )
SCREAMING_SNAKE_CASE : str = 'lower newer'
SCREAMING_SNAKE_CASE : Dict = processor(text=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = CLIPSegProcessor(tokenizer=A, image_processor=A )
SCREAMING_SNAKE_CASE : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=A, images=A )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = CLIPSegProcessor(tokenizer=A, image_processor=A )
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=A, visual_prompt=A )
self.assertListEqual(list(inputs.keys() ), ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = CLIPSegProcessor(tokenizer=A, image_processor=A )
SCREAMING_SNAKE_CASE : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode(A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(A )
self.assertListEqual(A, A )
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 1_8, 2]
__A = True if 'large' in model_name or 'huge' in model_name else False
__A = True if 'large' in model_name or 'huge' in model_name else False
__A = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__A = [3, 3, 3, 3]
__A = [5, 5, 5, 5]
elif "fl4" in model_name:
__A = [4, 4, 4, 4]
__A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__A = [3, 3, 3, 3]
if "lrf" in model_name:
__A = [3, 3, 3, 3]
else:
__A = [2, 2, 2, 2]
if "tiny" in model_name:
__A = 9_6
elif "small" in model_name:
__A = 9_6
elif "base" in model_name:
__A = 1_2_8
elif "large" in model_name:
__A = 1_9_2
elif "xlarge" in model_name:
__A = 2_5_6
elif "huge" in model_name:
__A = 3_5_2
# set label information
__A = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__A = 'imagenet-22k-id2label.json'
else:
__A = 'imagenet-1k-id2label.json'
__A = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
__A = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__A = {v: k for k, v in idalabel.items()}
__A = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
if "patch_embed.proj" in name:
__A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__A = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__A = 'encoder.' + name
if "encoder.layers" in name:
__A = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__A = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__A = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__A = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__A = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__A = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__A = 'layernorm.weight'
if name == "norm.bias":
__A = 'layernorm.bias'
if "head" in name:
__A = name.replace("head" , "classifier" )
else:
__A = 'focalnet.' + name
return name
def UpperCAmelCase ( a_ , a_ , a_=False ) -> str:
"""simple docstring"""
__A = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__A = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase_ )
__A = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )['model']
# rename keys
for key in state_dict.copy().keys():
__A = state_dict.pop(lowerCamelCase_ )
__A = val
__A = get_focalnet_config(lowerCamelCase_ )
__A = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
__A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A = BitImageProcessor(
do_resize=lowerCamelCase_ , size={"shortest_edge": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=2_2_4 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
__A = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
__A = processor(images=lowerCamelCase_ , return_tensors="pt" )
__A = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__A = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1E-4 )
__A = model(**lowerCamelCase_ )
__A = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__A = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__A = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__A = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__A = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__A = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__A = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__=3 ,A__=32 ,A__=3 ,A__=10 ,A__=[10, 20, 30, 40] ,A__=[1, 1, 2, 1] ,A__=True ,A__=True ,A__="relu" ,A__=3 ,A__=None ,):
_A : Union[str, Any] = parent
_A : Any = batch_size
_A : Tuple = image_size
_A : Union[str, Any] = num_channels
_A : int = embeddings_size
_A : Any = hidden_sizes
_A : str = depths
_A : List[Any] = is_training
_A : Dict = use_labels
_A : List[str] = hidden_act
_A : List[Any] = num_labels
_A : Optional[Any] = scope
_A : Union[str, Any] = len(A__ )
def A__ ( self ):
_A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] ,self.num_labels )
_A : Tuple = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Union[str, Any] = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
_A : List[str] = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Union[str, Any] = self.num_labels
_A : Optional[Any] = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
_A : Optional[Any] = model(A__ ,labels=A__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self ):
_A : List[Any] = self.prepare_config_and_inputs()
_A : Optional[Any] = config_and_inputs
_A : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _a , _a , unittest.TestCase ):
__snake_case : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__snake_case : Optional[int] = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Union[str, Any] = False
__snake_case : List[str] = False
__snake_case : Optional[Any] = False
__snake_case : List[Any] = False
def A__ ( self ):
_A : Union[str, Any] = RegNetModelTester(self )
_A : Optional[Any] = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ )
def A__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def A__ ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def A__ ( self ):
pass
def A__ ( self ):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = model_class(A__ )
_A : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Any = [*signature.parameters.keys()]
_A : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A__ )
def A__ ( self ):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def A__ ( self ):
_A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def A__ ( self ):
def check_hidden_states_output(A__ ,A__ ,A__ ):
_A : str = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : Any = model(**self._prepare_for_class(A__ ,A__ ) )
_A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : int = self.model_tester.num_stages
self.assertEqual(len(A__ ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
_A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Dict = layer_type
_A : Any = True
check_hidden_states_output(A__ ,A__ ,A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : str = True
check_hidden_states_output(A__ ,A__ ,A__ )
def A__ ( self ):
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def A__ ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def a__ () -> List[str]:
_A : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def A__ ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
_A : Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
_A : Dict = self.default_image_processor
_A : Optional[Any] = prepare_img()
_A : List[str] = image_processor(images=A__ ,return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_A : Optional[int] = model(**A__ )
# verify the logits
_A : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,A__ )
_A : Optional[int] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4 ) )
| 206 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_lowerCAmelCase = str(lowerCamelCase_ )
_lowerCAmelCase = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __snake_case ( SCREAMING_SNAKE_CASE: Dict = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}')
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {"tokenizer_file": "tokenizer.json"}
_lowercase = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class lowerCamelCase__ ( _a ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = None
def __init__( self : Union[str, Any] , __a : Optional[Any]=None , __a : List[str]=None , __a : Union[str, Any]=None , __a : List[Any]="<unk>" , __a : Optional[int]="<s>" , __a : str="</s>" , __a : Tuple="<pad>" , __a : List[str]=False , __a : Optional[Any]=False , **__a : List[Any] , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
lowerCamelCase__: Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase__: Dict = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase__: Optional[int] = add_prefix_space
lowerCamelCase__: List[Any] = pre_tok_class(**__a )
lowerCamelCase__: Tuple = add_prefix_space
def lowerCamelCase_ ( self : Optional[int] , *__a : List[Any] , **__a : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def lowerCamelCase_ ( self : int , *__a : Union[str, Any] , **__a : Any ):
'''simple docstring'''
lowerCamelCase__: Dict = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def lowerCamelCase_ ( self : str , __a : Optional[int] , __a : str = None ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase_ ( self : Union[str, Any] , __a : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase__: Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 306 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 0 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase_ ):
print(F"""{i}\t\t{d}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for j in range(lowerCamelCase_ ):
lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = [float("inf" )] * vertex_count
lowercase__ : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase_ ):
lowercase__ : Any = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowercase__ : int = distance[u] + w
lowercase__ : Dict = check_negative_cycle(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {"src": src, "dst": dest, "weight": weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
def A__ ( lowercase: str ) -> int:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
A : List[str] =0
A : Optional[int] =str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
A : Any =[int(lowerCamelCase_ ) for i in num_string]
A : List[Any] =1
for i in range(0, len(lowerCamelCase_ ) ):
total *= numbers[i]
A : Optional[Any] =str(lowerCamelCase_ )
steps += 1
return steps
def A__ ( lowercase: Optional[Any] ) -> int:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
A : Optional[int] =0
A : str =str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
A : Dict =[int(lowerCamelCase_ ) for i in num_string]
A : Any =0
for i in range(0, len(lowerCamelCase_ ) ):
total += numbers[i]
A : Dict =str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__lowerCamelCase : int = False
class __magic_name__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = 'A painting of a squirrel eating a burger '
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = generator.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = 'A painting of a squirrel eating a burger '
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 323 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
SCREAMING_SNAKE_CASE : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
snake_case = 701
snake_case = 1_000_000_000
snake_case = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 62 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a ="src/transformers"
a ="docs/source/en/tasks"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
# Find the start prompt.
__lowerCamelCase : Optional[Any] = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
__lowerCamelCase : Union[str, Any] = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a =direct_transformers_import(TRANSFORMERS_PATH)
a ={
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a ={
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : int = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCamelCase : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
__lowerCamelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Any:
__lowerCamelCase : Any = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
__lowerCamelCase : int = get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 652 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCAmelCase : str = "scheduler_config.json"
class __magic_name__ ( _a ):
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 5
@dataclass
class __magic_name__ ( _a ):
"""simple docstring"""
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = SCHEDULER_CONFIG_NAME
__UpperCamelCase = ["""dtype"""]
__UpperCamelCase = []
__UpperCamelCase = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Dict , snake_case :Tuple = None , snake_case :Optional[Any] = None , snake_case :Union[str, Any]=False , **snake_case :Tuple , ):
'''simple docstring'''
A_ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=snake_case , subfolder=snake_case , return_unused_kwargs=snake_case , **snake_case , )
A_ : Tuple = cls.from_config(snake_case , return_unused_kwargs=snake_case , **snake_case )
if hasattr(snake_case , "create_state" ) and getattr(snake_case , "has_state" , snake_case ):
A_ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any] , snake_case :List[str] = False , **snake_case :int ):
'''simple docstring'''
self.save_config(save_directory=snake_case , push_to_hub=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Union[str, Any] ):
'''simple docstring'''
A_ : Any = list(set([cls.__name__] + cls._compatibles ) )
A_ : Dict = importlib.import_module(__name__.split("." )[0] )
A_ : Any = [
getattr(snake_case , snake_case ) for c in compatible_classes_str if hasattr(snake_case , snake_case )
]
return compatible_classes
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=0.9_99 , _lowerCAmelCase : Any=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowerCAmelCase : Tuple ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
A_ : List[Any] = []
for i in range(lowerCamelCase_ ):
A_ : Any = i / num_diffusion_timesteps
A_ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
@classmethod
def SCREAMING_SNAKE_CASE ( cls :List[str] , snake_case :Any ):
'''simple docstring'''
A_ : int = scheduler.config
if config.trained_betas is not None:
A_ : str = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : Dict = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
A_ : List[str] = 1.0 - betas
A_ : Union[str, Any] = jnp.cumprod(snake_case , axis=0 )
return cls(
alphas=snake_case , betas=snake_case , alphas_cumprod=snake_case , )
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ) -> int:
A_ : str = state.alphas_cumprod
A_ : str = alphas_cumprod[timesteps] ** 0.5
A_ : Optional[Any] = sqrt_alpha_prod.flatten()
A_ : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
A_ : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
A_ : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> List[str]:
A_ : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Tuple:
A_ : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A_ : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 454 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
__A = [[1, 2, 4], [1, 2, 3, 4]]
__A = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids ,A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase_ ( self : List[str] ):
__A = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def UpperCamelCase_ ( self : Optional[int] ):
__A = [[1, 2, 3], [1, 2, 4]]
__A = DisjunctiveConstraint(A )
__A = dc.update(1 )
__A = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__A = dc.update(2 )
__A = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__A = dc.update(3 )
__A = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase_ ( self : Tuple ):
__A = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__A = DisjunctiveConstraint(A )
__A = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__A = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__A = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__A = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__A = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__A = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__A = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 55 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase__ ( _a ):
def A__ ( self ):
_A : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A__ ,'''tf_padding''' ) )
self.parent.assertTrue(hasattr(A__ ,'''depth_multiplier''' ) )
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__=13 ,A__=3 ,A__=32 ,A__=0.25 ,A__=8 ,A__=True ,A__=1024 ,A__=32 ,A__="relu6" ,A__=0.1 ,A__=0.02 ,A__=True ,A__=True ,A__=10 ,A__=None ,):
_A : Optional[Any] = parent
_A : Any = batch_size
_A : Dict = num_channels
_A : Dict = image_size
_A : List[str] = depth_multiplier
_A : Tuple = min_depth
_A : Union[str, Any] = tf_padding
_A : Optional[int] = int(last_hidden_size * depth_multiplier )
_A : Tuple = output_stride
_A : Optional[Any] = hidden_act
_A : Dict = classifier_dropout_prob
_A : Optional[int] = use_labels
_A : Optional[Any] = is_training
_A : int = num_labels
_A : Tuple = initializer_range
_A : int = scope
def A__ ( self ):
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
_A : Tuple = None
if self.use_labels:
_A : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_A : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_A : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ):
_A : Dict = MobileNetVaModel(config=A__ )
model.to(A__ )
model.eval()
_A : List[Any] = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ):
_A : List[str] = self.num_labels
_A : str = MobileNetVaForImageClassification(A__ )
model.to(A__ )
model.eval()
_A : List[Any] = model(A__ ,labels=A__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self ):
_A : Dict = self.prepare_config_and_inputs()
_A : Tuple = config_and_inputs
_A : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _a , _a , unittest.TestCase ):
__snake_case : int = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__snake_case : Union[str, Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
__snake_case : Any = False
__snake_case : List[str] = False
def A__ ( self ):
_A : Any = MobileNetVaModelTester(self )
_A : Optional[Any] = MobileNetVaConfigTester(self ,config_class=A__ ,has_text_modality=A__ )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def A__ ( self ):
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def A__ ( self ):
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def A__ ( self ):
pass
def A__ ( self ):
_A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : str = model_class(A__ )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A__ )
def A__ ( self ):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def A__ ( self ):
def check_hidden_states_output(A__ ,A__ ,A__ ):
_A : Any = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_A : Any = model(**self._prepare_for_class(A__ ,A__ ) )
_A : List[Any] = outputs.hidden_states
_A : Optional[Any] = 26
self.assertEqual(len(A__ ) ,A__ )
_A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = True
check_hidden_states_output(A__ ,A__ ,A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[Any] = True
check_hidden_states_output(A__ ,A__ ,A__ )
def A__ ( self ):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def A__ ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : str = MobileNetVaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def a__ () -> Tuple:
_A : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def A__ ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def A__ ( self ):
_A : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(A__ )
_A : List[str] = self.default_image_processor
_A : Union[str, Any] = prepare_img()
_A : Tuple = image_processor(images=A__ ,return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_A : Tuple = model(**A__ )
# verify the logits
_A : Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape ,A__ )
_A : List[Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4 ) )
| 206 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : str , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = eval_examples
_lowerCAmelCase = post_process_function
def __lowerCamelCase ( self : str , UpperCAmelCase_ : str = None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Tuple = "eval" , **UpperCAmelCase_ : List[Any] , ) -> Dict[str, float]:
"""simple docstring"""
_lowerCAmelCase = gen_kwargs.copy()
_lowerCAmelCase = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
_lowerCAmelCase = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
_lowerCAmelCase = gen_kwargs
_lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCAmelCase = self.get_eval_dataloader(UpperCAmelCase_ )
_lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = time.time()
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCAmelCase = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowerCAmelCase = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowerCAmelCase = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
else:
_lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any] = "test" , **UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = gen_kwargs.copy()
_lowerCAmelCase = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = time.time()
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCAmelCase = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCAmelCase = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 'predict' )
_lowerCAmelCase = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowerCAmelCase = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
| 580 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: str = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__a ).to(__a )
lowerCamelCase__: str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCamelCase__: Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
lowerCamelCase__: Optional[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
lowerCamelCase__: Optional[Any] = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
lowerCamelCase__: Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCamelCase__: Union[str, Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 306 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , SCREAMING_SNAKE_CASE , )
super().__init__(args=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
A : Optional[Any] =[10, 20, 30, 40, 50, 60]
A : Optional[int] =[2, 4, 6, 8, 10, 12]
A : Optional[int] =1_00
self.assertEqual(kp.calc_profit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2_10 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'Weight can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'Profit can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ ( _a ):
lowercase : Dict =""""""
lowercase : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase : str =None # compression type in fsspec. ex: "gzip"
lowercase : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple = "" , UpperCamelCase__ : Dict = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase = fsspec.open(
UpperCamelCase__ , mode="rb" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex("." )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCAmelCase = None
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__ ).lstrip("/" )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCAmelCase = {f['name']: f}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
return self.file.open().read()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str] = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Dict , ) -> str:
'''simple docstring'''
UpperCAmelCase = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __magic_name__ ( _a ):
lowercase : Tuple ="""bz2"""
lowercase : Dict ="""bz2"""
lowercase : List[Any] =""".bz2"""
class __magic_name__ ( _a ):
lowercase : Optional[Any] ="""gzip"""
lowercase : int ="""gzip"""
lowercase : Optional[Any] =""".gz"""
class __magic_name__ ( _a ):
lowercase : Any ="""lz4"""
lowercase : List[str] ="""lz4"""
lowercase : str =""".lz4"""
class __magic_name__ ( _a ):
lowercase : List[Any] ="""xz"""
lowercase : str ="""xz"""
lowercase : int =""".xz"""
class __magic_name__ ( _a ):
lowercase : List[str] ="""zstd"""
lowercase : List[Any] ="""zstd"""
lowercase : Tuple =""".zst"""
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict = "rb" , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : str = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : Any , ) -> Dict:
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase = self.file.__enter__
class __magic_name__ :
def __init__( self : str , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = file_
def __enter__( self : Dict ) -> Any:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self : Dict ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return next(self._file )
def __getattr__( self : Tuple , UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
UpperCAmelCase = fixed_enter
| 323 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE : Tuple = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE : str = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE : List[str] = checkpoint.split("/" )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE : str = checkpoint
SCREAMING_SNAKE_CASE : Any = dump_path
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE : int = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
SCREAMING_SNAKE_CASE : str = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A_ ( unittest.TestCase ):
_UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_UpperCAmelCase : int = ["""accelerate""", """launch"""]
_UpperCAmelCase : Optional[Any] = Path.home() / """.cache/huggingface/accelerate"""
_UpperCAmelCase : Dict = """default_config.yaml"""
_UpperCAmelCase : str = config_folder / config_file
_UpperCAmelCase : Dict = config_folder / """_default_config.yaml"""
_UpperCAmelCase : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase ( cls : Dict):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def lowerCAmelCase ( cls : Dict):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy())
def lowerCAmelCase ( self : Any):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=SCREAMING_SNAKE_CASE__):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(SCREAMING_SNAKE_CASE__), self.test_file_path] ,env=os.environ.copy())
def lowerCAmelCase ( self : Tuple):
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy())
class A_ ( unittest.TestCase ):
_UpperCAmelCase : int = """test-tpu"""
_UpperCAmelCase : List[str] = """us-central1-a"""
_UpperCAmelCase : Dict = """ls"""
_UpperCAmelCase : Union[str, Any] = ["""accelerate""", """tpu-config"""]
_UpperCAmelCase : str = """cd /usr/share"""
_UpperCAmelCase : int = """tests/test_samples/test_command_file.sh"""
_UpperCAmelCase : Dict = """Running gcloud compute tpus tpu-vm ssh"""
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=SCREAMING_SNAKE_CASE__)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" ,SCREAMING_SNAKE_CASE__ ,)
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ) -> list:
A_ : Dict = len(lowerCamelCase_ )
A_ : Any = [[0] * n for i in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
A_ : Optional[int] = y_points[i]
for i in range(2 , lowerCamelCase_ ):
for j in range(lowerCamelCase_ , lowerCamelCase_ ):
A_ : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( _a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = 5
# Realm tok
SCREAMING_SNAKE_CASE : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname, 'realm_tokenizer' )
os.makedirs(A, exist_ok=A )
SCREAMING_SNAKE_CASE : Dict = os.path.join(A, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, 'realm_block_records' )
os.makedirs(A, exist_ok=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'realm_tokenizer' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
], dtype=A, )
return block_records
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE : int = retriever.tokenizer
SCREAMING_SNAKE_CASE : Any = np.array([0, 3], dtype='long' )
SCREAMING_SNAKE_CASE : str = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer(
['the fourth'], add_special_tokens=A, return_token_type_ids=A, return_attention_mask=A, ).input_ids
SCREAMING_SNAKE_CASE : Tuple = config.reader_seq_len
SCREAMING_SNAKE_CASE : Dict = retriever(
A, A, answer_ids=A, max_length=A, return_tensors='np' )
self.assertEqual(len(A ), 2 )
self.assertEqual(len(A ), 2 )
self.assertEqual(len(A ), 2 )
self.assertEqual(concat_inputs.input_ids.shape, (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'], )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_config()
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE : Optional[Any] = retriever.tokenizer
SCREAMING_SNAKE_CASE : int = np.array([0, 3, 5], dtype='long' )
SCREAMING_SNAKE_CASE : str = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
['the fourth', 'longer longer'], add_special_tokens=A, return_token_type_ids=A, return_attention_mask=A, ).input_ids
SCREAMING_SNAKE_CASE : Dict = config.reader_seq_len
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(
A, A, answer_ids=A, max_length=A, return_tensors='np' )
self.assertEqual([False, True, True], A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, 'realm_block_records' ) )
# Test local path
SCREAMING_SNAKE_CASE : List[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname, 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0], B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
SCREAMING_SNAKE_CASE : Dict = os.path.join(
os.path.join(self.tmpdirname, 'realm_block_records' ), _REALM_BLOCK_RECORDS_FILENAME )
SCREAMING_SNAKE_CASE : int = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0], B'This is the first record' )
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
def is_valid_tree(a_ ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase_ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
a_ , a_ , a_ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase_ )
)
return is_binary_search_tree_recursive_check(lowerCamelCase_ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def A__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
_A : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_A : Optional[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_euler''' )
_A : str = 'A painting of a squirrel eating a burger'
_A : int = torch.manual_seed(0 )
_A : int = sd_pipe([prompt] ,generator=A__ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='''np''' )
_A : Union[str, Any] = output.images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Dict = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
_A : int = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_A : Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_euler''' )
_A : Any = 'A painting of a squirrel eating a burger'
_A : Any = torch.manual_seed(0 )
_A : Union[str, Any] = sd_pipe([prompt] ,generator=A__ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='''np''' )
_A : List[str] = output.images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Tuple = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def A__ ( self ):
_A : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_A : List[str] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_A : Optional[Any] = 'A painting of a squirrel eating a burger'
_A : Optional[Any] = torch.manual_seed(0 )
_A : Union[str, Any] = sd_pipe(
[prompt] ,generator=A__ ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type='''np''' ,use_karras_sigmas=A__ ,)
_A : List[Any] = output.images
_A : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Dict = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 206 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
"""simple docstring"""
import re
def __snake_case ( SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
_lowerCAmelCase = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
_snake_case = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ :
def __init__( self : Optional[int] , __a : Optional[Any]=False , __a : Tuple=False , __a : Optional[int]=6.0 , __a : str=None , __a : Union[str, Any]=False , __a : Optional[int]=False , __a : int=None , __a : Any="fp4" , __a : str=False , **__a : int , ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = load_in_abit
lowerCamelCase__: List[Any] = load_in_abit
lowerCamelCase__: Union[str, Any] = llm_inta_threshold
lowerCamelCase__: List[Any] = llm_inta_skip_modules
lowerCamelCase__: Any = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase__: Tuple = llm_inta_has_fpaa_weight
lowerCamelCase__: Dict = bnb_abit_quant_type
lowerCamelCase__: List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase__: Union[str, Any] = torch.floataa
elif isinstance(__a , __a ):
lowerCamelCase__: Tuple = getattr(__a , __a )
elif isinstance(__a , torch.dtype ):
lowerCamelCase__: int = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , __a ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __a ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __a ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __a ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __a ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __a ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCamelCase_ ( cls : Any , __a : str , __a : Optional[int] , **__a : Any ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = cls(**__a )
lowerCamelCase__: int = []
for key, value in kwargs.items():
if hasattr(__a , __a ):
setattr(__a , __a , __a )
to_remove.append(__a )
for key in to_remove:
kwargs.pop(__a , __a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCamelCase_ ( self : List[Any] , __a : Any ):
'''simple docstring'''
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
lowerCamelCase__: Union[str, Any] = self.to_dict()
lowerCamelCase__: int = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
writer.write(__a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase__: str = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[str] ):
'''simple docstring'''
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def lowerCamelCase_ ( self : Tuple , __a : Optional[int] = True ):
'''simple docstring'''
if use_diff is True:
lowerCamelCase__: int = self.to_diff_dict()
else:
lowerCamelCase__: List[str] = self.to_dict()
return json.dumps(__a , indent=2 , sort_keys=__a ) + "\n"
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.to_dict()
# get the default config dict
lowerCamelCase__: Optional[Any] = BitsAndBytesConfig().to_dict()
lowerCamelCase__: str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase__: Dict = value
return serializable_config_dict
| 306 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=0.2 , SCREAMING_SNAKE_CASE : int=0.2 ):
lowercase__ : str = bp_numa
lowercase__ : List[str] = bp_numa
lowercase__ : List[str] = bp_numa
lowercase__ : Union[str, Any] = conva_get[:2]
lowercase__ : Any = conva_get[2]
lowercase__ : Dict = size_pa
lowercase__ : Dict = rate_w
lowercase__ : int = rate_t
lowercase__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : List[str] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(SCREAMING_SNAKE_CASE , "wb" ) as f:
pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""Model saved: {save_path}""" )
@classmethod
def snake_case ( cls : Any , SCREAMING_SNAKE_CASE : str ):
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowercase__ : Union[str, Any] = pickle.load(SCREAMING_SNAKE_CASE ) # noqa: S301
lowercase__ : int = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ : Optional[int] = model_dic.get("size_pooling1" )
lowercase__ : Union[str, Any] = model_dic.get("num_bp1" )
lowercase__ : Union[str, Any] = model_dic.get("num_bp2" )
lowercase__ : Tuple = model_dic.get("num_bp3" )
lowercase__ : int = model_dic.get("rate_weight" )
lowercase__ : List[str] = model_dic.get("rate_thre" )
# create model instance
lowercase__ : List[Any] = CNN(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# modify model parameter
lowercase__ : Optional[Any] = model_dic.get("w_conv1" )
lowercase__ : int = model_dic.get("wkj" )
lowercase__ : Dict = model_dic.get("vji" )
lowercase__ : Optional[Any] = model_dic.get("thre_conv1" )
lowercase__ : List[Any] = model_dic.get("thre_bp2" )
lowercase__ : Any = model_dic.get("thre_bp3" )
return conv_ins
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
return round(SCREAMING_SNAKE_CASE , 3 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Tuple = convs[0]
lowercase__ : Any = convs[1]
lowercase__ : Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )[0]
# get the data slice of original image data, data_focus
lowercase__ : str = []
for i_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(SCREAMING_SNAKE_CASE )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : List[Any] = []
lowercase__ : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = []
for i_focus in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = np.asmatrix(SCREAMING_SNAKE_CASE ).reshape(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
data_featuremap.append(SCREAMING_SNAKE_CASE )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = np.asarray(SCREAMING_SNAKE_CASE )
return focus_list, data_featuremap
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]="average_pool" ):
lowercase__ : Tuple = len(featuremaps[0] )
lowercase__ : List[Any] = int(size_map / size_pooling )
lowercase__ : Optional[int] = []
for i_map in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Union[str, Any] = featuremaps[i_map]
lowercase__ : Union[str, Any] = []
for i_focus in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(SCREAMING_SNAKE_CASE ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = np.asmatrix(SCREAMING_SNAKE_CASE ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
featuremap_pooled.append(SCREAMING_SNAKE_CASE )
return featuremap_pooled
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : int = data_listed.getA().tolist()[0]
data_expanded.extend(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.asarray(SCREAMING_SNAKE_CASE )
return data_expanded
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.shape(SCREAMING_SNAKE_CASE )
lowercase__ : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[int] = []
lowercase__ : Dict = 0
for i_map in range(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = np.ones((size_map, size_map) )
for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = pd_pool[
i_pool
]
lowercase__ : Dict = i_pool + 1
lowercase__ : List[Any] = np.multiply(
SCREAMING_SNAKE_CASE , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(SCREAMING_SNAKE_CASE )
return pd_all
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=bool ):
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(SCREAMING_SNAKE_CASE )) )
print((" - - Shape: Teach_Data ", np.shape(SCREAMING_SNAKE_CASE )) )
lowercase__ : Dict = 0
lowercase__ : Tuple = []
lowercase__ : List[Any] = 10_000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(SCREAMING_SNAKE_CASE ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : str = np.asarray(datas_teach[p] )
lowercase__ : List[Any] = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[int] = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
lowercase__ : List[str] = np.shape(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self._expand(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = data_bp_input
lowercase__ : Dict = np.dot(SCREAMING_SNAKE_CASE , self.vji.T ) - self.thre_bpa
lowercase__ : Dict = self.sig(SCREAMING_SNAKE_CASE )
lowercase__ : int = np.dot(SCREAMING_SNAKE_CASE , self.wkj.T ) - self.thre_bpa
lowercase__ : Tuple = self.sig(SCREAMING_SNAKE_CASE )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : Dict = np.multiply(
(data_teach - bp_outa) , np.multiply(SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.multiply(
np.dot(SCREAMING_SNAKE_CASE , self.wkj ) , np.multiply(SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
lowercase__ : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE , self.vji )
lowercase__ : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : int = pd_conva_pooled.T.getA().tolist()
lowercase__ : Dict = self._calculate_gradient_from_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Optional[int] = self.rate_weight * np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Optional[int] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Dict = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : List[Any] = rp + 1
lowercase__ : Any = error_count / patterns
all_mse.append(SCREAMING_SNAKE_CASE )
def draw_error():
lowercase__ : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(SCREAMING_SNAKE_CASE , "+-" )
plt.plot(SCREAMING_SNAKE_CASE , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(SCREAMING_SNAKE_CASE , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(SCREAMING_SNAKE_CASE )) )
for p in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : str = np.asmatrix(datas_test[p] )
lowercase__ : Dict = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Any = self.sig(SCREAMING_SNAKE_CASE )
lowercase__ : str = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : Any = self.sig(SCREAMING_SNAKE_CASE )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Union[str, Any] = [list(map(self.do_round , SCREAMING_SNAKE_CASE ) ) for each in produce_out]
return np.asarray(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Dict = np.asmatrix(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.convolute(
SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(SCREAMING_SNAKE_CASE , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
_lowercase : Optional[Any] =tuple[float, float, float]
_lowercase : int =tuple[float, float, float]
def A__ ( lowercase: int, lowercase: Union[str, Any] ) -> Vectorad:
A : int =end_pointa[0] - end_pointa[0]
A : List[Any] =end_pointa[1] - end_pointa[1]
A : Optional[int] =end_pointa[2] - end_pointa[2]
return (x, y, z)
def A__ ( lowercase: Tuple, lowercase: Optional[Any] ) -> Vectorad:
A : Optional[int] =ab[1] * ac[2] - ab[2] * ac[1] # *i
A : Dict =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A : Tuple =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A__ ( lowercase: Optional[Any], lowercase: List[str] ) -> bool:
return tuple(round(lowerCamelCase_, lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def A__ ( lowercase: Tuple, lowercase: List[Any], lowercase: Union[str, Any], lowercase: Dict = 10 ) -> bool:
A : int =create_vector(lowerCamelCase_, lowerCamelCase_ )
A : Any =create_vector(lowerCamelCase_, lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_, lowerCamelCase_ ), lowerCamelCase_ )
| 305 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True ) -> Optional[int]:
model.train()
UpperCAmelCase = model(lowerCamelCase_ )
UpperCAmelCase = F.mse_loss(lowerCamelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
set_seed(42 )
UpperCAmelCase = RegressionModel()
UpperCAmelCase = deepcopy(lowerCamelCase_ )
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(lowerCamelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCAmelCase = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
UpperCAmelCase = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
UpperCAmelCase = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase = get_training_setup(lowerCamelCase_ )
# Use a single batch
UpperCAmelCase = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def lowerCamelCase_(lowerCamelCase_ ) -> str:
# Test on distributed setup that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCamelCase_ )
# Use a single batch
UpperCAmelCase = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def lowerCamelCase_(lowerCamelCase_=False , lowerCamelCase_=False ) -> Optional[Any]:
UpperCAmelCase = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCAmelCase = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
GradientState._reset_state()
def lowerCamelCase_(lowerCamelCase_=False , lowerCamelCase_=False ) -> Dict:
UpperCAmelCase = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase = get_training_setup(lowerCamelCase_ , lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def lowerCamelCase_() -> Dict:
UpperCAmelCase = Accelerator()
UpperCAmelCase = RegressionDataset(length=80 )
UpperCAmelCase = DataLoader(lowerCamelCase_ , batch_size=16 )
UpperCAmelCase = RegressionDataset(length=96 )
UpperCAmelCase = DataLoader(lowerCamelCase_ , batch_size=16 )
UpperCAmelCase = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if iteration < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if batch_num < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase_() -> Tuple:
UpperCAmelCase = Accelerator()
UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCamelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCamelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCamelCase_ , lowerCamelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 323 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ).loss
SCREAMING_SNAKE_CASE : Tuple = -tf.math.reduce_mean(UpperCAmelCase_ ).numpy()
SCREAMING_SNAKE_CASE : Any = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 62 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_8 ,SCREAMING_SNAKE_CASE__ : Tuple=3_0 ,SCREAMING_SNAKE_CASE__ : str=4_0_0 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Any=True ,):
__lowerCamelCase : List[Any] = size if size is not None else {'height': 1_8, 'width': 1_8}
__lowerCamelCase : Tuple = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Union[str, Any] = num_channels
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : List[Any] = min_resolution
__lowerCamelCase : Optional[Any] = max_resolution
__lowerCamelCase : Any = do_resize
__lowerCamelCase : int = size
__lowerCamelCase : Union[str, Any] = apply_ocr
def lowerCAmelCase ( self : Optional[int]):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A_ ( _a , unittest.TestCase ):
_UpperCAmelCase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = LayoutLMvaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Dict):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'apply_ocr'))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'height': 1_8, 'width': 1_8})
__lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2)
self.assertEqual(image_processor.size ,{'height': 4_2, 'width': 4_2})
def lowerCAmelCase ( self : Tuple):
pass
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : str = image_processing(image_inputs[0] ,return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,SCREAMING_SNAKE_CASE__)
self.assertIsInstance(encoding.boxes ,SCREAMING_SNAKE_CASE__)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : int = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
__lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCamelCase : Union[str, Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test')
__lowerCamelCase : Union[str, Any] = Image.open(ds[0]['file']).convert('RGB')
__lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) ,len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCamelCase : Union[str, Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowerCamelCase : str = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(encoding.boxes ,SCREAMING_SNAKE_CASE__)
# with apply_OCR = False
__lowerCamelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4))
| 652 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowerCAmelCase : int ) -> Tuple:
if "cls_token" in name:
A_ : int = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
A_ : List[str] = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
A_ : int = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
A_ : int = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A_ : List[Any] = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A_ : Optional[int] = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
A_ : Optional[Any] = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
A_ : Optional[int] = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
A_ : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A_ : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
A_ : str = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A_ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A_ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
A_ : Any = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
A_ : Dict = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
A_ : List[Any] = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
A_ : str = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
A_ : Tuple = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
A_ : Dict = key.split("." )
A_ : Any = int(key_split[1] )
if "decoder_blocks" in key:
A_ : int = config.decoder_hidden_size
A_ : Any = 'decoder.decoder_layers.'
if "weight" in key:
A_ : str = val[:dim, :]
A_ : int = val[dim : dim * 2, :]
A_ : int = val[-dim:, :]
elif "bias" in key:
A_ : Optional[int] = val[:dim]
A_ : Union[str, Any] = val[dim : dim * 2]
A_ : str = val[-dim:]
else:
A_ : Any = config.hidden_size
A_ : Union[str, Any] = 'vit.encoder.layer.'
if "weight" in key:
A_ : Tuple = val[:dim, :]
A_ : Union[str, Any] = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
elif "bias" in key:
A_ : int = val[:dim]
A_ : Tuple = val[dim : dim * 2]
A_ : Tuple = val[-dim:]
else:
A_ : str = val
return orig_state_dict
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> str:
A_ : str = ViTMAEConfig()
if "large" in checkpoint_url:
A_ : Dict = 1024
A_ : List[str] = 4096
A_ : str = 24
A_ : Optional[Any] = 16
elif "huge" in checkpoint_url:
A_ : int = 14
A_ : Union[str, Any] = 1280
A_ : Union[str, Any] = 5120
A_ : Tuple = 32
A_ : Optional[Any] = 16
A_ : Union[str, Any] = ViTMAEForPreTraining(lowerCamelCase_ )
A_ : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )['model']
A_ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
A_ : Optional[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
A_ : Optional[Any] = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
A_ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
A_ : List[str] = ViTMAEImageProcessor(size=config.image_size )
A_ : str = image_processor(images=lowerCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A_ : str = model(**lowerCamelCase_ )
A_ : Optional[int] = outputs.logits
if "large" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A_ : Any = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A_ : Optional[int] = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 454 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 0 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "ybelkada/fonts"
def lowercase__( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'Pix2StructImageProcessor. Please upgrade torch.' )
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Tuple ,__UpperCamelCase: Dict ):
"""simple docstring"""
requires_backends(lowerCamelCase_ ,['torch'] )
_check_torch_version()
SCREAMING_SNAKE_CASE : Tuple = image_tensor.unsqueeze(0 )
SCREAMING_SNAKE_CASE : str = torch.nn.functional.unfold(lowerCamelCase_ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
SCREAMING_SNAKE_CASE : Dict = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowerCamelCase_ ,lowerCamelCase_ ,-1 )
SCREAMING_SNAKE_CASE : Optional[int] = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: str = 36 ,__UpperCamelCase: Union[str, Any] = "black" ,__UpperCamelCase: Dict = "white" ,__UpperCamelCase: str = 5 ,__UpperCamelCase: Optional[Any] = 5 ,__UpperCamelCase: List[str] = 5 ,__UpperCamelCase: Tuple = 5 ,__UpperCamelCase: str = None ,__UpperCamelCase: List[Any] = None ,):
"""simple docstring"""
requires_backends(lowerCamelCase_ ,'vision' )
# Add new lines so that each line is no more than 80 characters.
SCREAMING_SNAKE_CASE : List[str] = textwrap.TextWrapper(width=80 )
SCREAMING_SNAKE_CASE : str = wrapper.wrap(text=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = '\n'.join(lowerCamelCase_ )
if font_bytes is not None and font_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = io.BytesIO(lowerCamelCase_ )
elif font_path is not None:
SCREAMING_SNAKE_CASE : str = font_path
else:
SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download(lowerCamelCase_ ,'Arial.TTF' )
SCREAMING_SNAKE_CASE : Tuple = ImageFont.truetype(lowerCamelCase_ ,encoding='UTF-8' ,size=lowerCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
SCREAMING_SNAKE_CASE : Union[str, Any] = ImageDraw.Draw(Image.new('RGB' ,(1, 1) ,lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = temp_draw.textbbox((0, 0) ,lowerCamelCase_ ,lowerCamelCase_ )
# Create the actual image with a bit of padding around the text.
SCREAMING_SNAKE_CASE : Dict = text_width + left_padding + right_padding
SCREAMING_SNAKE_CASE : Tuple = text_height + top_padding + bottom_padding
SCREAMING_SNAKE_CASE : List[str] = Image.new('RGB' ,(image_width, image_height) ,lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = ImageDraw.Draw(lowerCamelCase_ )
draw.text(xy=(left_padding, top_padding) ,text=lowerCamelCase_ ,fill=lowerCamelCase_ ,font=lowerCamelCase_ )
return image
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[Any] ,**__UpperCamelCase: Optional[int] ):
"""simple docstring"""
requires_backends(lowerCamelCase_ ,'vision' )
# Convert to PIL image if necessary
SCREAMING_SNAKE_CASE : str = to_pil_image(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = render_text(lowerCamelCase_ ,**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = max(header_image.width ,image.width )
SCREAMING_SNAKE_CASE : Optional[int] = int(image.height * (new_width / image.width) )
SCREAMING_SNAKE_CASE : Any = int(header_image.height * (new_width / header_image.width) )
SCREAMING_SNAKE_CASE : List[str] = Image.new('RGB' ,(new_width, new_height + new_header_height) ,'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
SCREAMING_SNAKE_CASE : str = to_numpy_array(lowerCamelCase_ )
if infer_channel_dimension_format(lowerCamelCase_ ) == ChannelDimension.LAST:
SCREAMING_SNAKE_CASE : int = to_channel_dimension_format(lowerCamelCase_ ,ChannelDimension.LAST )
return new_image
class _a ( _a ):
'''simple docstring'''
A : List[Any] = ["""flattened_patches"""]
def __init__( self, A = True, A = True, A = None, A = 2_048, A = False, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = do_convert_rgb
SCREAMING_SNAKE_CASE : int = max_patches
SCREAMING_SNAKE_CASE : List[str] = is_vqa
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches, 'torch' )
_check_torch_version()
# convert to torch
SCREAMING_SNAKE_CASE : List[Any] = to_channel_dimension_format(A, ChannelDimension.FIRST )
SCREAMING_SNAKE_CASE : int = torch.from_numpy(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size['height'], patch_size['width']
SCREAMING_SNAKE_CASE : List[str] = get_image_size(A )
# maximize scale s.t.
SCREAMING_SNAKE_CASE : Optional[int] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
SCREAMING_SNAKE_CASE : List[Any] = max(min(math.floor(scale * image_height / patch_height ), A ), 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = max(min(math.floor(scale * image_width / patch_width ), A ), 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(num_feasible_rows * patch_height, 1 )
SCREAMING_SNAKE_CASE : int = max(num_feasible_cols * patch_width, 1 )
SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ), size=(resized_height, resized_width), mode='bilinear', align_corners=A, antialias=A, ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE : Dict = torch_extract_patches(A, A, A )
SCREAMING_SNAKE_CASE : Tuple = patches.shape
SCREAMING_SNAKE_CASE : List[str] = patches_shape[1]
SCREAMING_SNAKE_CASE : Optional[Any] = patches_shape[2]
SCREAMING_SNAKE_CASE : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE : Dict = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
SCREAMING_SNAKE_CASE : str = torch.arange(A ).reshape([rows, 1] ).repeat(1, A ).reshape([rows * columns, 1] )
SCREAMING_SNAKE_CASE : str = torch.arange(A ).reshape([1, columns] ).repeat(A, 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
SCREAMING_SNAKE_CASE : Any = row_ids.to(torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE : int = torch.cat([row_ids, col_ids, patches], -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.pad(A, [0, 0, 0, max_patches - (rows * columns)] ).float()
SCREAMING_SNAKE_CASE : Tuple = to_numpy_array(A )
return result
def UpperCamelCase_ ( self, A, A = None, **A ):
'''simple docstring'''
if image.dtype == np.uinta:
SCREAMING_SNAKE_CASE : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
SCREAMING_SNAKE_CASE : Dict = np.mean(A )
SCREAMING_SNAKE_CASE : Dict = np.std(A )
SCREAMING_SNAKE_CASE : str = max(A, 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A, mean=A, std=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE : List[str] = patch_size if patch_size is not None else self.patch_size
SCREAMING_SNAKE_CASE : Tuple = max_patches if max_patches is not None else self.max_patches
SCREAMING_SNAKE_CASE : List[str] = self.is_vqa
if kwargs.get('data_format', A ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
SCREAMING_SNAKE_CASE : Optional[int] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE : int = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(A ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('font_bytes', A )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('font_path', A )
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[str] = [header_text] * len(A )
SCREAMING_SNAKE_CASE : Tuple = [
render_header(A, header_text[i], font_bytes=A, font_path=A )
for i, image in enumerate(A )
]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=A ) for image in images]
# convert to torch tensor and permute
SCREAMING_SNAKE_CASE : Optional[Any] = [
self.extract_flattened_patches(image=A, max_patches=A, patch_size=A )
for image in images
]
# create attention mask in numpy
SCREAMING_SNAKE_CASE : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=A )
return encoded_outputs
| 28 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( _a ):
'''simple docstring'''
snake_case_ = """mobilenet_v2"""
def __init__( self : Dict ,A : Union[str, Any]=3 ,A : Optional[Any]=2_24 ,A : Optional[int]=1.0 ,A : Union[str, Any]=8 ,A : Dict=8 ,A : Optional[Any]=6 ,A : Tuple=32 ,A : List[Any]=True ,A : int=True ,A : Optional[Any]="relu6" ,A : int=True ,A : int=0.8 ,A : str=0.02 ,A : int=0.0_01 ,A : Optional[int]=2_55 ,**A : Optional[Any] ,):
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = depth_divisible_by
__A = min_depth
__A = expand_ratio
__A = output_stride
__A = first_layer_is_expansion
__A = finegrained_output
__A = hidden_act
__A = tf_padding
__A = classifier_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = semantic_loss_ignore_index
class UpperCAmelCase ( _a ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Dict ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase_ ( self : Dict ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase_ ( self : Dict ):
return 1E-4
| 55 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_UpperCamelCase : Union[str, Any] =1.054_571_817E-34 # unit of ℏ : J * s
_UpperCamelCase : int =3E8 # unit of c : m * s^-1
def a__ (__lowercase :int , __lowercase :str , __lowercase :Tuple ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
_A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_A : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_A : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 0 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowerCamelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowerCamelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowerCamelCase_ , help='where to store parsed gold_data_path file' , )
_lowerCAmelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
_lowerCAmelCase = json.load(lowerCamelCase_ )
for dpr_record in tqdm(lowerCamelCase_ ):
_lowerCAmelCase = dpr_record['question']
_lowerCAmelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 580 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 0 |
def __lowerCAmelCase ( _UpperCamelCase ) -> set:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__: List[Any] = get_edges(lowerCamelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__: str = edges.pop()
chosen_vertices.add(lowerCamelCase_ )
chosen_vertices.add(lowerCamelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase_ )
return chosen_vertices
def __lowerCAmelCase ( _UpperCamelCase ) -> set:
'''simple docstring'''
lowerCamelCase__: List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 306 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case__(_a ):
"""simple docstring"""
lowercase_ = """speech_to_text"""
lowercase_ = ["""past_key_values"""]
lowercase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , SCREAMING_SNAKE_CASE : Any=10_000 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Tuple=2_048 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Tuple=2_048 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]="relu" , SCREAMING_SNAKE_CASE : Tuple=256 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=6_000 , SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : List[str]=(5, 5) , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=80 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[int] = d_model
lowercase__ : Any = encoder_ffn_dim
lowercase__ : Tuple = encoder_layers
lowercase__ : Tuple = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : Optional[int] = decoder_layers
lowercase__ : str = decoder_attention_heads
lowercase__ : Union[str, Any] = dropout
lowercase__ : str = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : int = init_std
lowercase__ : str = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : str = max_source_positions
lowercase__ : List[str] = max_target_positions
lowercase__ : Dict = num_conv_layers
lowercase__ : Tuple = list(SCREAMING_SNAKE_CASE )
lowercase__ : str = conv_channels
lowercase__ : int = input_feat_per_channel
lowercase__ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 496 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
def A__ ( lowercase: Optional[Any] ) -> int:
if n == 1 or not isinstance(lowerCamelCase_, lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
A : List[str] =[0, 1]
for i in range(2, n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A__ ( lowercase: str ) -> int:
A : Tuple =0
A : List[str] =2
while digits < n:
index += 1
A : Optional[int] =len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def A__ ( lowercase: Union[str, Any] = 1_000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
UpperCAmelCase = TaConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
UpperCAmelCase = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 323 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 0 |
import string
import numpy
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , lowerCamelCase_ )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase_ : Union[str, Any] = numpy.vectorize(lambda lowerCAmelCase : x % 3_6 )
UpperCamelCase_ : Optional[Any] = numpy.vectorize(_a )
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.modulus(UpperCAmelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE : Optional[Any] = encrypt_key.shape[0]
def _A ( self : Dict , UpperCAmelCase_ : Tuple ):
return self.key_string.index(UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : str ):
return self.key_string[round(UpperCAmelCase_ )]
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE : Dict = det % len(self.key_string )
SCREAMING_SNAKE_CASE : str = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase_ , len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE : Any = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = [char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE : int = chars[-1]
while len(UpperCAmelCase_ ) % self.break_key != 0:
chars.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def _A ( self : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Dict = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE : List[Any] = ''
for i in range(0 , len(UpperCAmelCase_ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE : Any = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE : Optional[int] = [self.replace_letters(UpperCAmelCase_ ) for char in batch]
SCREAMING_SNAKE_CASE : str = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE : List[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase_ ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE : List[str] = ''.join(
self.replace_digits(UpperCAmelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE : Dict = det % len(self.key_string )
SCREAMING_SNAKE_CASE : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE : Dict = i
break
SCREAMING_SNAKE_CASE : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase_ ) )
def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.make_decrypt_key()
SCREAMING_SNAKE_CASE : Optional[Any] = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE : Optional[Any] = ''
for i in range(0 , len(UpperCAmelCase_ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE : Dict = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(UpperCAmelCase_ ) for char in batch]
SCREAMING_SNAKE_CASE : Dict = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE : str = self.modulus(decrypt_key.dot(UpperCAmelCase_ ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = ''.join(
self.replace_digits(UpperCAmelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = int(input("Enter the order of the encryption key: " ) )
SCREAMING_SNAKE_CASE : List[Any] = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = [int(lowerCamelCase_ ) for x in input().split()]
hill_matrix.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = HillCipher(numpy.array(lowerCamelCase_ ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
SCREAMING_SNAKE_CASE : int = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
SCREAMING_SNAKE_CASE : Dict = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(lowerCamelCase_ ) )
elif option == "2":
SCREAMING_SNAKE_CASE : List[str] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 0 |
a ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCamelCase : Dict = F"a bytes-like object is required, not \'{data.__class__.__name__}\'"
raise TypeError(lowerCamelCase_ )
__lowerCamelCase : List[str] = ''.join(bin(lowerCamelCase_ )[2:].zfill(8 ) for byte in data )
__lowerCamelCase : Tuple = len(lowerCamelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowerCamelCase : Union[str, Any] = B'=' * ((6 - len(lowerCamelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCamelCase_ ) % 6)
else:
__lowerCamelCase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCamelCase_ ) , 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCamelCase : str = (
'argument should be a bytes-like object or ASCII string, '
F"not \'{encoded_data.__class__.__name__}\'"
)
raise TypeError(lowerCamelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
try:
__lowerCamelCase : List[Any] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__lowerCamelCase : int = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCamelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowerCamelCase : Tuple = encoded_data[:-padding]
__lowerCamelCase : str = ''.join(
bin(B64_CHARSET.index(lowerCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowerCamelCase : List[str] = ''.join(
bin(B64_CHARSET.index(lowerCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowerCamelCase : Tuple = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCamelCase_ ) , 8 )
]
return bytes(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import math
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 454 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 0 |
'''simple docstring'''
from collections import defaultdict
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = first_str.lower().strip()
SCREAMING_SNAKE_CASE : List[str] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE : str = first_str.replace(' ' ,'' )
SCREAMING_SNAKE_CASE : List[Any] = second_str.replace(' ' ,'' )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE : defaultdict[str, int] = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("Enter the first string ").strip()
UpperCamelCase_ = input("Enter the second string ").strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 28 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE :str = logging.getLogger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , "config.json" ) ):
os.remove(os.path.join(lowerCamelCase_ , "config.json" ) )
if os.path.exists(os.path.join(lowerCamelCase_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCamelCase_ , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase ( a_ , a_=False ) -> str:
"""simple docstring"""
__A = 2
if unlogit:
__A = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__A = p * torch.log(lowerCamelCase_ )
__A = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_=True , a_=True , a_=None , a_=False ) -> int:
"""simple docstring"""
__A = model.config.num_hidden_layers, model.config.num_attention_heads
__A = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__A = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__A = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__A = None
__A = 0.0
__A = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__A = tuple(t.to(args.device ) for t in inputs )
(__A ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__A = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__A = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__A = 2
__A = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCamelCase_ )
logger.info("Head ranked by importance scores" )
__A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__A = torch.arange(
head_importance.numel() , device=args.device )
__A = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase ( a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
__A = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__A = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCamelCase_ , original_score * args.masking_threshold )
__A = torch.ones_like(lowerCamelCase_ )
__A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__A = original_score
while current_score >= original_score * args.masking_threshold:
__A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__A = float("Inf" )
__A = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__A = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__A = new_head_mask.view(-1 )
__A = 0.0
__A = new_head_mask.view_as(lowerCamelCase_ )
__A = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__A = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__A = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
__A = datetime.now()
__A = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__A = 1 / loss
__A = datetime.now() - before_time
__A = sum(p.numel() for p in model.parameters() )
__A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__A = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__A = sum(p.numel() for p in model.parameters() )
__A = datetime.now()
__A = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__A = 1 / loss
__A = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCamelCase_ , lowerCamelCase_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCamelCase_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCamelCase_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCamelCase_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCamelCase_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCamelCase_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCamelCase_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=lowerCamelCase_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCamelCase_ , help="Batch size." )
parser.add_argument("--seed" , type=lowerCamelCase_ , default=4_2 )
parser.add_argument("--local_rank" , type=lowerCamelCase_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCamelCase_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCamelCase_ , default="" , help="Can be used for distant debugging." )
__A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__A = torch.device("cuda" , args.local_rank )
__A = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__A = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__A = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCamelCase_ )
# Prepare dataset
__A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__A = (torch.from_numpy(lowerCamelCase_ ),)
__A = TensorDataset(*lowerCamelCase_ )
__A = RandomSampler(lowerCamelCase_ )
__A = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__A = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
def a__ (__lowercase :Union[str, Any] ) -> int:
_A : list[list[int]] = [[0 for _ in range(lowerCamelCase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A : Dict = 1
for n in range(m + 1 ):
for k in range(1 , lowerCamelCase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase : Optional[Any] =int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
_UpperCamelCase : Union[str, Any] =int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 206 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["ConditionalDetrFeatureExtractor"]
_lowercase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 306 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class snake_case__:
"""simple docstring"""
lowercase_ = MBartConfig
lowercase_ = {}
lowercase_ = """gelu"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=20 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : int=0 , ):
lowercase__ : str = parent
lowercase__ : str = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Dict = is_training
lowercase__ : str = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[Any] = eos_token_id
lowercase__ : Optional[int] = pad_token_id
lowercase__ : Any = bos_token_id
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : str = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, inputs_dict
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : str = TFMBartModel(config=SCREAMING_SNAKE_CASE ).get_decoder()
lowercase__ : Union[str, Any] = inputs_dict['input_ids']
lowercase__ : str = input_ids[:1, :]
lowercase__ : Optional[int] = inputs_dict['attention_mask'][:1, :]
lowercase__ : Tuple = inputs_dict['head_mask']
lowercase__ : Tuple = 1
# first forward pass
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs.to_tuple()
lowercase__ : Optional[int] = past_key_values[1]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase__ : Dict = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__(_a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case ( self : int ):
lowercase__ : List[str] = TFMBartModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase_ = """facebook/mbart-large-en-ro"""
@cached_property
def snake_case ( self : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : str = self.translate_src_text(**SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text , SCREAMING_SNAKE_CASE )
def snake_case ( self : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[Any] = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE , return_tensors="tf" )
lowercase__ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase__ : List[Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
return generated_words
@slow
def snake_case ( self : str ):
self._assert_generated_batch_equal_expected()
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.