code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowercase : Union[str, Any] =collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowercase : Optional[Any] ="https://storage.googleapis.com/cvdf-datasets/mnist/"
def A__ ( lowercase: str ) -> Union[str, Any]:
A : Optional[int] =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=__lowerCamelCase )[0]
@deprecated(__lowerCamelCase, 'Please use tf.data to implement this functionality.' )
def A__ ( lowercase: Optional[int] ) -> Union[str, Any]:
print('Extracting', f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
A : Union[str, Any] =_readaa(__lowerCamelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
A : Any =_readaa(__lowerCamelCase )
A : Union[str, Any] =_readaa(__lowerCamelCase )
A : List[Any] =_readaa(__lowerCamelCase )
A : Optional[int] =bytestream.read(rows * cols * num_images )
A : Union[str, Any] =numpy.frombuffer(__lowerCamelCase, dtype=numpy.uinta )
A : Union[str, Any] =data.reshape(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, 1 )
return data
@deprecated(__lowerCamelCase, 'Please use tf.one_hot on tensors.' )
def A__ ( lowercase: Optional[int], lowercase: List[Any] ) -> Tuple:
A : Any =labels_dense.shape[0]
A : List[str] =numpy.arange(__lowerCamelCase ) * num_classes
A : Union[str, Any] =numpy.zeros((num_labels, num_classes) )
A : List[Any] =1
return labels_one_hot
@deprecated(__lowerCamelCase, 'Please use tf.data to implement this functionality.' )
def A__ ( lowercase: Optional[Any], lowercase: Optional[Any]=False, lowercase: Optional[int]=10 ) -> List[str]:
print('Extracting', f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
A : str =_readaa(__lowerCamelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
A : str =_readaa(__lowerCamelCase )
A : Optional[int] =bytestream.read(__lowerCamelCase )
A : List[Any] =numpy.frombuffer(__lowerCamelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCamelCase, __lowerCamelCase )
return labels
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@deprecated(
__lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> List[Any]:
A : str =random_seed.get_seed(__lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A : Union[str, Any] =dtypes.as_dtype(__lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
A : int =1_00_00
A : Dict =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
A : Dict =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A : Any =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A : Any =images.astype(numpy.floataa )
A : Union[str, Any] =numpy.multiply(__lowercase , 1.0 / 2_5_5.0 )
A : str =images
A : Dict =labels
A : int =0
A : int =0
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
return self._images
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
return self._labels
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
return self._num_examples
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
return self._epochs_completed
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> List[str]:
if fake_data:
A : Any =[1] * 7_84
A : Any =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowercase )],
[fake_label for _ in range(__lowercase )],
)
A : Dict =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A : str =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
A : int =self.images[perma]
A : List[str] =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A : Tuple =self._num_examples - start
A : int =self._images[start : self._num_examples]
A : Optional[int] =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A : Union[str, Any] =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
A : str =self.images[perm]
A : List[str] =self.labels[perm]
# Start next epoch
A : List[Any] =0
A : Optional[Any] =batch_size - rest_num_examples
A : Any =self._index_in_epoch
A : List[str] =self._images[start:end]
A : Optional[int] =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A : Optional[int] =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCamelCase, 'Please write your own downloading logic.' )
def A__ ( lowercase: Tuple, lowercase: List[str], lowercase: int ) -> Dict:
if not gfile.Exists(__lowerCamelCase ):
gfile.MakeDirs(__lowerCamelCase )
A : List[Any] =os.path.join(__lowerCamelCase, __lowerCamelCase )
if not gfile.Exists(__lowerCamelCase ):
urllib.request.urlretrieve(__lowerCamelCase, __lowerCamelCase ) # noqa: S310
with gfile.GFile(__lowerCamelCase ) as f:
A : str =f.size()
print('Successfully downloaded', __lowerCamelCase, __lowerCamelCase, 'bytes.' )
return filepath
@deprecated(
__lowerCamelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def A__ ( lowercase: Tuple, lowercase: int=False, lowercase: Any=False, lowercase: Dict=dtypes.floataa, lowercase: Optional[int]=True, lowercase: Union[str, Any]=5_000, lowercase: Dict=None, lowercase: Union[str, Any]=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=__lowerCamelCase, one_hot=__lowerCamelCase, dtype=__lowerCamelCase, seed=__lowerCamelCase )
A : Union[str, Any] =fake()
A : Optional[Any] =fake()
A : Optional[int] =fake()
return _Datasets(train=__lowerCamelCase, validation=__lowerCamelCase, test=__lowerCamelCase )
if not source_url: # empty string check
A : Tuple =DEFAULT_SOURCE_URL
A : Tuple ="""train-images-idx3-ubyte.gz"""
A : Union[str, Any] ="""train-labels-idx1-ubyte.gz"""
A : Optional[int] ="""t10k-images-idx3-ubyte.gz"""
A : List[str] ="""t10k-labels-idx1-ubyte.gz"""
A : Optional[int] =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + train_images_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Dict =_extract_images(__lowerCamelCase )
A : Tuple =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + train_labels_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Optional[Any] =_extract_labels(__lowerCamelCase, one_hot=__lowerCamelCase )
A : Dict =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + test_images_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Optional[int] =_extract_images(__lowerCamelCase )
A : Optional[int] =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + test_labels_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : List[str] =_extract_labels(__lowerCamelCase, one_hot=__lowerCamelCase )
if not 0 <= validation_size <= len(__lowerCamelCase ):
A : List[Any] =(
"""Validation size should be between 0 and """
F'{len(__lowerCamelCase )}. Received: {validation_size}.'
)
raise ValueError(__lowerCamelCase )
A : Dict =train_images[:validation_size]
A : Dict =train_labels[:validation_size]
A : Dict =train_images[validation_size:]
A : Optional[int] =train_labels[validation_size:]
A : Tuple ={"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
A : Tuple =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
A : List[str] =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
A : Optional[int] =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
return _Datasets(train=__lowerCamelCase, validation=__lowerCamelCase, test=__lowerCamelCase )
| 305 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__a : Any = logging.get_logger(__name__)
class __lowercase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ):
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 637 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( __lowercase , __lowercase ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : str =tmp_path / """cache"""
UpperCamelCase_ : List[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase_ : Optional[Any] =ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : List[str] =tmp_path / """cache"""
UpperCamelCase_ : List[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase_ : Dict =features.copy() if features else default_expected_features
UpperCamelCase_ : Optional[Any] =(
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase_ : Dict =ParquetDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : int =tmp_path / """cache"""
UpperCamelCase_ : Dict ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase_ : List[str] =ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A_ ( __lowercase , __lowercase , __lowercase ):
if issubclass(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase_ : int =parquet_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase_ : Any =[parquet_path]
UpperCamelCase_ : Any =tmp_path / """cache"""
UpperCamelCase_ : List[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase_ : Union[str, Any] =ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_dataset(__lowerCamelCase , __lowerCamelCase )
def A_ ( __lowercase , __lowercase , __lowercase=("train",) ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
UpperCamelCase_ : Union[str, Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =tmp_path / """cache"""
UpperCamelCase_ : Any ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase_ : Any =ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : List[str] =tmp_path / """cache"""
UpperCamelCase_ : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase_ : Optional[int] =features.copy() if features else default_expected_features
UpperCamelCase_ : Dict =(
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase_ : Optional[Any] =ParquetDatasetReader({'train': parquet_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A_ ( __lowercase , __lowercase , __lowercase ):
if split:
UpperCamelCase_ : Union[str, Any] ={split: parquet_path}
else:
UpperCamelCase_ : str ="""train"""
UpperCamelCase_ : int ={"""train""": parquet_path, """test""": parquet_path}
UpperCamelCase_ : Optional[Any] =tmp_path / """cache"""
UpperCamelCase_ : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase_ : List[Any] =ParquetDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_parquet_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Any =ParquetDatasetWriter(__lowerCamelCase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase_ : int =pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCamelCase_ : int =pf.read()
assert dataset.data.table == output_table
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Optional[Any] =str(shared_datadir / 'test_image_rgb.jpg' )
UpperCamelCase_ : Optional[int] ={"""image""": [image_path]}
UpperCamelCase_ : List[Any] =Features({'image': Image()} )
UpperCamelCase_ : Dict =Dataset.from_dict(__lowerCamelCase , features=__lowerCamelCase )
UpperCamelCase_ : int =ParquetDatasetWriter(__lowerCamelCase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCamelCase_ : str =Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase_ : int =ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A_ ( __lowercase , __lowercase ):
assert get_writer_batch_size(__lowerCamelCase ) == expected
| 357 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class A__(lowercase__ ):
"""simple docstring"""
_A : Union[str, Any] = 'timesformer'
def __init__( self , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=8 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.0_2 , _lowercase=1e-6 , _lowercase=True , _lowercase="divided_space_time" , _lowercase=0 , **_lowercase , ) -> Tuple:
super().__init__(**__lowercase )
a_ : Optional[int] = image_size
a_ : Dict = patch_size
a_ : Dict = num_channels
a_ : str = num_frames
a_ : Optional[int] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Union[str, Any] = initializer_range
a_ : Any = layer_norm_eps
a_ : List[Any] = qkv_bias
a_ : Union[str, Any] = attention_type
a_ : Dict = drop_path_rate
| 540 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[float] ):
'''simple docstring'''
if len(__lowerCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__magic_name__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCamelCase_( snake_case__: str=None ) -> Dict:
if subparsers is not None:
UpperCAmelCase__ = subparsers.add_parser('env' )
else:
UpperCAmelCase__ = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=__lowerCamelCase , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=__lowerCamelCase )
return parser
def UpperCamelCase_( snake_case__: List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = torch.__version__
UpperCAmelCase__ = torch.cuda.is_available()
UpperCAmelCase__ = is_xpu_available()
UpperCAmelCase__ = is_npu_available()
UpperCAmelCase__ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__lowerCamelCase ):
UpperCAmelCase__ = load_config_from_file(args.config_file ).to_dict()
UpperCAmelCase__ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__lowerCamelCase ),
"""PyTorch NPU available""": str(__lowerCamelCase ),
"""System RAM""": f"{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCAmelCase__ = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
UpperCAmelCase__ = (
"""\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else f"\t{accelerate_config}"
)
print(__lowerCamelCase )
UpperCAmelCase__ = accelerate_config
return info
def UpperCamelCase_( ) -> str:
UpperCAmelCase__ = env_command_parser()
UpperCAmelCase__ = parser.parse_args()
env_command(__lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 146 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
'''simple docstring'''
lowerCAmelCase__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 41 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = SMALL_MODEL_IDENTIFIER
snake_case_ = """pt"""
snake_case_ = """tf"""
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def lowerCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ = """mock_framework"""
# Framework provided - return whatever the user provides
snake_case_ = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
snake_case_ = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
snake_case_ = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
snake_case_ = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
snake_case_ = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
snake_case_ = FeaturesManager.determine_framework(__lowercase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ):
snake_case_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case_ = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_torch_available" , __lowercase ):
snake_case_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
snake_case_ = MagicMock(return_value=__lowercase )
snake_case_ = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
snake_case_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
snake_case_ = MagicMock(return_value=__lowercase )
snake_case_ = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
with self.assertRaises(__lowercase ):
snake_case_ = FeaturesManager.determine_framework(self.test_model )
| 283 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class __a ( lowercase__ ):
__snake_case : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **UpperCAmelCase : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase_ : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
lowerCAmelCase_ : str = kwargs.pop("""torchscript""" , self.torchscript )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
__snake_case : bool = field(default=lowercase__ ,metadata={"""help""": """Trace the models using torchscript"""} )
__snake_case : bool = field(default=lowercase__ ,metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__snake_case : str = field(
default="""O1""" ,metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} ,)
@cached_property
def A ( self : Any ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowerCAmelCase_ : str = torch.device("""cpu""" )
lowerCAmelCase_ : int = 0
elif is_torch_tpu_available():
lowerCAmelCase_ : Tuple = xm.xla_device()
lowerCAmelCase_ : int = 0
else:
lowerCAmelCase_ : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowerCAmelCase_ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def A ( self : Optional[Any] ):
return is_torch_tpu_available() and self.tpu
@property
def A ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def A ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def A ( self : Tuple ):
return self.n_gpu > 0
| 600 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
from collections import namedtuple
_lowerCamelCase = namedtuple('from_to', 'from_ to')
_lowerCamelCase = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: str , UpperCamelCase__: str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase_ ( yaml.SafeLoader ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case_ : Optional[Any] = [tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
snake_case_ : List[Any] = Counter(__lowercase )
snake_case_ : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
snake_case_ : Union[str, Any] = super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def lowerCAmelCase__ ( _a : str ):
snake_case_ : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case_ : int = full_content[1:].index("---" ) + 1
snake_case_ : Any = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCamelCase )
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
A : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
with open(__lowercase , encoding="utf-8" ) as readme_file:
snake_case_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
if path.exists():
with open(__lowercase , encoding="utf-8" ) as readme_file:
snake_case_ : int = readme_file.read()
else:
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = self._to_readme(__lowercase )
with open(__lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__lowercase )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE = None ) -> str:
if readme_content is not None:
snake_case_ : Union[str, Any] = _split_yaml_from_readme(__lowercase )
snake_case_ : Any = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
snake_case_ : Tuple = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
snake_case_ : Tuple = yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case_ : Tuple = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def _lowerCAmelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding="utf-8" , ).decode("utf-8" )
lowercase : Any = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowercase : Tuple = ap.parse_args()
lowercase : List[Any] = Path(args.readme_filepath)
lowercase : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 568 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
def A__ ( lowercase: List[Any], lowercase: Union[str, Any] ) -> Any:
A : Tuple =0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A__ ( lowercase: List[str], lowercase: Union[str, Any], lowercase: Dict ) -> Optional[Any]:
A : int =0
while b > 0:
if b & 1:
A : Optional[int] =((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 305 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 0 |
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__A = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
__A = 6
__A = 1
__A = 1_9_0_1
__A = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__A = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
__A = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
__A = day - days_per_month[month - 2]
if month > 1_2:
year += 1
__A = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 637 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 0 |
"""simple docstring"""
from collections import deque
def A_ ( __lowercase ):
UpperCamelCase_ : List[Any] =len(__lowerCamelCase )
UpperCamelCase_ : List[Any] =deque()
UpperCamelCase_ : int =[False for _ in range(__lowerCamelCase )]
UpperCamelCase_ : Dict =[-1 for _ in range(__lowerCamelCase )]
UpperCamelCase_ : Optional[Any] =index_of[:]
def strong_connect(__lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Dict =index # the number when this node is seen
UpperCamelCase_ : List[str] =index # lowest rank node reachable from here
index += 1
stack.append(__lowerCamelCase )
UpperCamelCase_ : List[str] =True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase_ : int =strong_connect(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ : List[Any] =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase_ : str =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase_ : Any =[]
UpperCamelCase_ : Tuple =stack.pop()
UpperCamelCase_ : List[str] =False
component.append(__lowerCamelCase )
while w != v:
UpperCamelCase_ : Dict =stack.pop()
UpperCamelCase_ : List[str] =False
component.append(__lowerCamelCase )
components.append(__lowerCamelCase )
return index
UpperCamelCase_ : Union[str, Any] =[]
for v in range(__lowerCamelCase ):
if index_of[v] == -1:
strong_connect(__lowerCamelCase , 0 , __lowerCamelCase )
return components
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[[] for _ in range(__lowerCamelCase )]
for u, v in edges:
g[u].append(__lowerCamelCase )
return g
if __name__ == "__main__":
# Test
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
__SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 357 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Dict = _symbol_database.Default()
__snake_case : Any = _descriptor_pool.Default().AddSerializedFile(
B"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
__snake_case : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Dict = None
__snake_case : int = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Dict = 45
__snake_case : List[Any] = 15_81
__snake_case : Tuple = 15_17
__snake_case : Dict = 15_70
__snake_case : Any = 15_84
__snake_case : Optional[Any] = 17_93
__snake_case : Union[str, Any] = 17_95
__snake_case : Tuple = 19_16
__snake_case : Any = 18_64
__snake_case : Dict = 19_05
__snake_case : List[str] = 19_19
__snake_case : List[Any] = 24_29
__snake_case : Tuple = 22_08
__snake_case : Union[str, Any] = 24_18
__snake_case : Any = 23_23
__snake_case : Tuple = 24_07
# @@protoc_insertion_point(module_scope)
| 540 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__magic_name__ : str =logging.get_logger(__name__)
class UpperCamelCase_ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 664 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( lowercase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'linear'
__SCREAMING_SNAKE_CASE = 'cosine'
__SCREAMING_SNAKE_CASE = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE = 'polynomial'
__SCREAMING_SNAKE_CASE = 'constant'
__SCREAMING_SNAKE_CASE = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE = 'piecewise_constant'
def UpperCamelCase_( snake_case__: Optimizer , snake_case__: int = -1 ) -> Any:
return LambdaLR(__lowerCamelCase , lambda snake_case__ : 1 , last_epoch=__lowerCamelCase )
def UpperCamelCase_( snake_case__: Optimizer , snake_case__: int , snake_case__: int = -1 ) -> Any:
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def UpperCamelCase_( snake_case__: Optimizer , snake_case__: str , snake_case__: int = -1 ) -> Union[str, Any]:
UpperCAmelCase__ = {}
UpperCAmelCase__ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
UpperCAmelCase__ = rule_str.split(':' )
UpperCAmelCase__ = int(__lowerCamelCase )
UpperCAmelCase__ = float(__lowerCamelCase )
UpperCAmelCase__ = value
UpperCAmelCase__ = float(rule_list[-1] )
def create_rules_function(snake_case__: Dict , snake_case__: List[Any] ):
def rule_func(snake_case__: int ) -> float:
UpperCAmelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase__ = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: str , snake_case__: Tuple , snake_case__: List[Any]=-1 ) -> List[Any]:
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: float = 0.5 , snake_case__: int = -1 ) -> Tuple:
def lr_lambda(snake_case__: Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
UpperCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: int = 1 , snake_case__: int = -1 ) -> Dict:
def lr_lambda(snake_case__: Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
UpperCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_( snake_case__: str , snake_case__: List[str] , snake_case__: Tuple , snake_case__: List[Any]=1e-7 , snake_case__: List[Any]=1.0 , snake_case__: int=-1 ) -> int:
UpperCAmelCase__ = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase__ = lr_init - lr_end
UpperCAmelCase__ = num_training_steps - num_warmup_steps
UpperCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_( snake_case__: Union[str, SchedulerType] , snake_case__: Optimizer , snake_case__: Optional[str] = None , snake_case__: Optional[int] = None , snake_case__: Optional[int] = None , snake_case__: int = 1 , snake_case__: float = 1.0 , snake_case__: int = -1 , ) -> Optional[Any]:
UpperCAmelCase__ = SchedulerType(__lowerCamelCase )
UpperCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 146 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 0 |
'''simple docstring'''
lowerCAmelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message: ''' )
__lowercase = input('''Enter key [alphanumeric]: ''' )
__lowercase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = """encrypt"""
__lowercase = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith('''d''' ):
__lowercase = """decrypt"""
__lowercase = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(F"\n{mode.title()}ed message:" )
print(__lowerCamelCase )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(__lowerCamelCase , __lowerCamelCase , '''encrypt''' )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(__lowerCamelCase , __lowerCamelCase , '''decrypt''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = 0
__lowercase = key.upper()
for symbol in message:
__lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
__lowercase = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 41 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE :List[Any] = 1.0_5457_1817e-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE :Any = 3e8 # unit of c : m * s^-1
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->Any:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
snake_case_ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
snake_case_ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
snake_case_ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : str ) -> List[str]:
'''simple docstring'''
return [ord(__lowerCamelCase ) - 96 for elem in plain]
def __UpperCamelCase ( lowercase__ : list[int] ) -> Union[str, Any]:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __lowerCamelCase )
print("""Decoded:""" , decode(__lowerCamelCase ) )
if __name__ == "__main__":
main()
| 600 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 0 |
from __future__ import annotations
from random import random
class UpperCamelCase_ :
def __init__( self :Optional[int] , __A :int | None = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = random()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __repr__( self :Optional[int] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = str(self.value ) + """ """
SCREAMING_SNAKE_CASE__ = str(self.left or """""" )
SCREAMING_SNAKE_CASE__ = str(self.right or """""" )
return value + left + right
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None , UpperCamelCase__: int ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE__ = split(root.left , __lowerCamelCase )
return left, root
else:
SCREAMING_SNAKE_CASE__ = split(root.right , __lowerCamelCase )
return root, right
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None , UpperCamelCase__: Node | None ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE__ = merge(left.right , __lowerCamelCase )
return left
else:
SCREAMING_SNAKE_CASE__ = merge(__lowerCamelCase , right.left )
return right
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = Node(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = split(__lowerCamelCase , value - 1 )
SCREAMING_SNAKE_CASE__ = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Node | None , UpperCamelCase__: str ):
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE__ = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE__ = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
SCREAMING_SNAKE_CASE__ = input()
while args != "q":
SCREAMING_SNAKE_CASE__ = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 6 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = GPTSanJapaneseTokenizer
A : Optional[Any] = False
A : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def _lowerCAmelCase ( self ) -> Any:
super().setUp()
# fmt: off
snake_case_ : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case_ : Dict = {"""unk_token""": """<unk>"""}
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case_ : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : int = self.get_input_output_texts(__lowercase )
snake_case_ : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
snake_case_ : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def _lowerCAmelCase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _lowerCAmelCase ( self ) -> Tuple:
pass # TODO add if relevant
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = self.get_tokenizer()
# Testing tokenization
snake_case_ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case_ : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case_ : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
snake_case_ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
snake_case_ : List[Any] = tokens + [tokenizer.unk_token]
snake_case_ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case_ : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = self.get_tokenizer()
# Testing tokenization
snake_case_ : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case_ : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case_ : Tuple = tokenizer.encode(__lowercase )
snake_case_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
snake_case_ : List[Any] = """こんにちは、世界。"""
snake_case_ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case_ : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case_ : List[str] = tokenizer.encode(prefix_text + input_text )
snake_case_ : List[Any] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
snake_case_ : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
snake_case_ : Optional[int] = tokenizer.decode(__lowercase )
snake_case_ : Any = tokenizer.decode(__lowercase )
snake_case_ : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
snake_case_ : int = """こんにちは、世界。"""
snake_case_ : List[Any] = """こんばんは、㔺界。😀"""
snake_case_ : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
snake_case_ : int = len(tokenizer.encode(__lowercase ) ) - 2
snake_case_ : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
snake_case_ : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
snake_case_ : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case_ : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
snake_case_ : Optional[Any] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case_ : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
snake_case_ : Optional[int] = tokenizer.encode("あンいワ" )
snake_case_ : Tuple = tokenizer.encode("" , prefix_text="あンいワ" )
snake_case_ : Optional[int] = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Any = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
snake_case_ : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case_ : int = tokenizer(__lowercase , padding=__lowercase )
snake_case_ : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
snake_case_ : Optional[int] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
snake_case_ : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case_ : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _lowerCAmelCase ( self ) -> int:
# tokenizer has no padding token
pass
| 568 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=30 , SCREAMING_SNAKE_CASE__ : Tuple=4_00 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Any=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=1 / 2_55 , SCREAMING_SNAKE_CASE__ : Dict=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Any =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A : Any =parent
A : List[Any] =batch_size
A : Optional[Any] =num_channels
A : List[Any] =min_resolution
A : List[Any] =max_resolution
A : Any =do_resize
A : Any =size
A : Dict =do_normalize
A : str =image_mean
A : Optional[int] =image_std
A : List[str] =do_rescale
A : Optional[int] =rescale_factor
A : Dict =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if not batched:
A : List[str] =image_inputs[0]
if isinstance(__lowercase , Image.Image ):
A : Any =image.size
else:
A : int =image.shape[1], image.shape[2]
if w < h:
A : Union[str, Any] =int(self.size['shortest_edge'] * h / w )
A : Optional[Any] =self.size["""shortest_edge"""]
elif w > h:
A : Union[str, Any] =self.size["""shortest_edge"""]
A : Any =int(self.size['shortest_edge'] * w / h )
else:
A : int =self.size["""shortest_edge"""]
A : str =self.size["""shortest_edge"""]
else:
A : Union[str, Any] =[]
for image in image_inputs:
A : Optional[int] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[Any] =max(__lowercase , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Optional[Any] =max(__lowercase , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : str =DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , 'image_mean' ) )
self.assertTrue(hasattr(__lowercase , 'image_std' ) )
self.assertTrue(hasattr(__lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowercase , 'do_resize' ) )
self.assertTrue(hasattr(__lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(__lowercase , 'do_pad' ) )
self.assertTrue(hasattr(__lowercase , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , __lowercase )
A : int =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
A : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
A : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A : Union[str, Any] =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Union[str, Any] =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
A : Optional[Any] =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
A : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
A : str =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A : Tuple =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Union[str, Any] =image_processing(__lowercase , return_tensors='pt' ).pixel_values
A : str =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
# Initialize image_processing
A : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
A : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A : Any =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Optional[Any] =image_processing(__lowercase , return_tensors='pt' ).pixel_values
A : Any =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union[str, Any]:
# prepare image and target
A : str =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : Union[str, Any] ={"""image_id""": 3_97_69, """annotations""": target}
# encode them
A : Dict =DeformableDetrImageProcessor()
A : List[Any] =image_processing(images=__lowercase , annotations=__lowercase , return_tensors='pt' )
# verify pixel values
A : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1e-4 ) )
# verify area
A : Union[str, Any] =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
A : Tuple =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
A : Optional[Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1e-3 ) )
# verify image_id
A : List[str] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
A : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
A : List[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
# prepare image, target and masks_path
A : int =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : int =json.loads(f.read() )
A : Optional[int] ={"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A : Union[str, Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =DeformableDetrImageProcessor(format='coco_panoptic' )
A : Tuple =image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors='pt' )
# verify pixel values
A : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
A : Optional[Any] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1e-4 ) )
# verify area
A : Optional[Any] =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
A : Dict =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
A : Tuple =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1e-3 ) )
# verify image_id
A : Union[str, Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
A : Union[str, Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
A : Optional[int] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify masks
A : Optional[Any] =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __lowercase )
# verify orig_size
A : str =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
| 305 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] ):
"""simple docstring"""
__A = {}
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__lowercase , """ -> """ , """ -> """.join([str(__lowercase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowercase )
else:
# else make a new vertex
__A = [to_vertex]
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : list ):
"""simple docstring"""
__A = True
print(__lowercase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
if __name__ == "__main__":
__a : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 637 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( __lowercase , __lowercase , __lowercase ):
def get_masked_lm_array(__lowercase ):
UpperCamelCase_ : str =F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase_ : Optional[int] =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
UpperCamelCase_ : Optional[int] =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_array(__lowercase ):
UpperCamelCase_ : Optional[int] =F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase_ : Union[str, Any] =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
UpperCamelCase_ : int =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_layer_array(__lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase_ : Union[str, Any] =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
UpperCamelCase_ : List[str] =array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_attention_layer_array(__lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Union[str, Any] =F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCamelCase_ : Tuple =tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase_ : List[Any] =array.reshape(__lowerCamelCase )
if "kernel" in name:
UpperCamelCase_ : Any =array.transpose()
return torch.from_numpy(__lowerCamelCase )
print(F'''Loading model based on config from {config_path}...''' )
UpperCamelCase_ : List[Any] =BertConfig.from_json_file(__lowerCamelCase )
UpperCamelCase_ : List[Any] =BertForMaskedLM(__lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCamelCase_ : BertLayer =model.bert.encoder.layer[layer_index]
# Self-attention
UpperCamelCase_ : BertSelfAttention =layer.attention.self
UpperCamelCase_ : Union[str, Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
UpperCamelCase_ : List[Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
UpperCamelCase_ : Any =get_encoder_attention_layer_array(
__lowerCamelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
UpperCamelCase_ : int =get_encoder_attention_layer_array(
__lowerCamelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
UpperCamelCase_ : Any =get_encoder_attention_layer_array(
__lowerCamelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
UpperCamelCase_ : int =get_encoder_attention_layer_array(
__lowerCamelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCamelCase_ : BertSelfOutput =layer.attention.output
UpperCamelCase_ : Optional[Any] =get_encoder_attention_layer_array(
__lowerCamelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
UpperCamelCase_ : List[str] =get_encoder_attention_layer_array(
__lowerCamelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
UpperCamelCase_ : List[str] =get_encoder_layer_array(__lowerCamelCase , '_attention_layer_norm/gamma' )
UpperCamelCase_ : Optional[Any] =get_encoder_layer_array(__lowerCamelCase , '_attention_layer_norm/beta' )
# Intermediate
UpperCamelCase_ : BertIntermediate =layer.intermediate
UpperCamelCase_ : int =get_encoder_layer_array(__lowerCamelCase , '_intermediate_dense/kernel' )
UpperCamelCase_ : Union[str, Any] =get_encoder_layer_array(__lowerCamelCase , '_intermediate_dense/bias' )
# Output
UpperCamelCase_ : BertOutput =layer.output
UpperCamelCase_ : Optional[int] =get_encoder_layer_array(__lowerCamelCase , '_output_dense/kernel' )
UpperCamelCase_ : int =get_encoder_layer_array(__lowerCamelCase , '_output_dense/bias' )
UpperCamelCase_ : Dict =get_encoder_layer_array(__lowerCamelCase , '_output_layer_norm/gamma' )
UpperCamelCase_ : Dict =get_encoder_layer_array(__lowerCamelCase , '_output_layer_norm/beta' )
# Embeddings
UpperCamelCase_ : Union[str, Any] =get_encoder_array('_position_embedding_layer/embeddings' )
UpperCamelCase_ : Tuple =get_encoder_array('_type_embedding_layer/embeddings' )
UpperCamelCase_ : List[str] =get_encoder_array('_embedding_norm_layer/gamma' )
UpperCamelCase_ : Dict =get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
UpperCamelCase_ : List[str] =model.cls.predictions.transform
UpperCamelCase_ : Tuple =get_masked_lm_array('dense/kernel' )
UpperCamelCase_ : List[str] =get_masked_lm_array('dense/bias' )
UpperCamelCase_ : Dict =get_masked_lm_array('layer_norm/gamma' )
UpperCamelCase_ : Optional[Any] =get_masked_lm_array('layer_norm/beta' )
UpperCamelCase_ : Optional[Any] =get_masked_lm_array('embedding_table' )
# Pooling
UpperCamelCase_ : Dict =BertPooler(config=__lowerCamelCase )
UpperCamelCase_ : BertPooler =get_encoder_array('_pooler_layer/kernel' )
UpperCamelCase_ : BertPooler =get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__lowerCamelCase )
# Integration test - should load without any errors ;)
UpperCamelCase_ : List[str] =BertForMaskedLM.from_pretrained(__lowerCamelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 357 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 540 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__ : Dict =logging.get_logger(__name__)
__magic_name__ : List[Any] ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__magic_name__ : int =[
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
'''simple docstring'''
for attribute in key.split("." ):
__magic_name__ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
__magic_name__ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
__magic_name__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__magic_name__ = value
elif weight_type == "weight_g":
__magic_name__ = value
elif weight_type == "weight_v":
__magic_name__ = value
elif weight_type == "bias":
__magic_name__ = value
else:
__magic_name__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = fairseq_model.state_dict()
__magic_name__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__magic_name__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__magic_name__ = True
if "*" in mapped_key:
__magic_name__ = name.split(__lowerCamelCase )[0].split("." )[-2]
__magic_name__ = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
__magic_name__ = """weight_g"""
elif "weight_v" in name:
__magic_name__ = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__magic_name__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ = """weight"""
else:
__magic_name__ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = full_name.split("conv_layers." )[-1]
__magic_name__ = name.split("." )
__magic_name__ = int(items[0] )
__magic_name__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__magic_name__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__magic_name__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__magic_name__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__magic_name__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any=None ):
'''simple docstring'''
__magic_name__ = torch.load(__lowerCamelCase )
__magic_name__ = WavLMConfigOrig(checkpoint["cfg"] )
__magic_name__ = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__magic_name__ = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
__magic_name__ = WavLMConfig()
__magic_name__ = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__magic_name__ : str =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__magic_name__ : List[Any] =parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 664 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase__ = CLIPTextModel(__lowercase )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase__ (self , __a , __a=0 ) -> Optional[int]:
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(__lowercase )
else:
UpperCAmelCase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = LDMTextToImagePipeline(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase__ = self.get_dummy_inputs(__lowercase )
UpperCAmelCase__ = pipe(**__lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCAmelCase__ = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self , __a , __a=torch.floataa , __a=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(__lowercase )
UpperCAmelCase__ = np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
UpperCAmelCase__ = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase__ = self.get_inputs(__lowercase )
UpperCAmelCase__ = pipe(**__lowercase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
UpperCAmelCase__ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self , __a , __a=torch.floataa , __a=0 ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(__lowercase )
UpperCAmelCase__ = np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
UpperCAmelCase__ = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase__ = self.get_inputs(__lowercase )
UpperCAmelCase__ = pipe(**__lowercase ).images[0]
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
UpperCAmelCase__ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 146 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ (lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : List[str] = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE : str = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str ,lowercase__ : Dict=None ,lowercase__ : int=None ,**lowercase__ : Any ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,__lowercase ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase ,__lowercase )
def __call__( self : int ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Any=None ,lowercase__ : Tuple="max_length" ,lowercase__ : Any="np" ,**lowercase__ : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowercase ,__lowercase ) or (isinstance(__lowercase ,__lowercase ) and not isinstance(text[0] ,__lowercase )):
__lowercase = [self.tokenizer(__lowercase ,padding=__lowercase ,return_tensors=__lowercase ,**__lowercase )]
elif isinstance(__lowercase ,__lowercase ) and isinstance(text[0] ,__lowercase ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(__lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowercase ) != max_num_queries:
__lowercase = t + [""" """] * (max_num_queries - len(__lowercase ))
__lowercase = self.tokenizer(__lowercase ,padding=__lowercase ,return_tensors=__lowercase ,**__lowercase )
encodings.append(__lowercase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowercase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding['''input_ids'''] for encoding in encodings] ,dim=0 )
__lowercase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
__lowercase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
__lowercase ,return_tensors=__lowercase ,**__lowercase ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(__lowercase ,return_tensors=__lowercase ,**__lowercase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) ,tensor_type=__lowercase )
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : str ,**lowercase__ : Optional[int] ):
return self.image_processor.post_process(*__lowercase ,**__lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ,*lowercase__ : Optional[Any] ,**lowercase__ : Optional[Any] ):
return self.image_processor.post_process_object_detection(*__lowercase ,**__lowercase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*lowercase__ : int ,**lowercase__ : Optional[Any] ):
return self.image_processor.post_process_image_guided_detection(*__lowercase ,**__lowercase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : int ,**lowercase__ : List[str] ):
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def SCREAMING_SNAKE_CASE ( self : List[str] ,*lowercase__ : Dict ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*__lowercase ,**__lowercase )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,__lowercase ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,__lowercase ,)
return self.image_processor
| 41 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = sd_pipe.prepare_inputs(__lowercase )
snake_case_ = replicate(__lowercase )
snake_case_ = shard(__lowercase )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(__lowercase , jax.device_count() )
snake_case_ = sd_pipe(__lowercase , __lowercase , __lowercase , num_inference_steps=2_5 , jit=__lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ = """stabilityai/stable-diffusion-2"""
snake_case_ = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowercase , subfolder="scheduler" )
snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , revision="bf16" , dtype=jnp.bfloataa , )
snake_case_ = scheduler_params
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = sd_pipe.prepare_inputs(__lowercase )
snake_case_ = replicate(__lowercase )
snake_case_ = shard(__lowercase )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(__lowercase , jax.device_count() )
snake_case_ = sd_pipe(__lowercase , __lowercase , __lowercase , num_inference_steps=2_5 , jit=__lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 283 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
import qiskit
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase_ : Tuple = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCAmelCase_ : Tuple = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 600 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :Tuple , __A :Union[str, Any] , __A :Any=13 , __A :Optional[int]=7 , __A :str=True , __A :Optional[Any]=True , __A :int=True , __A :int=True , __A :List[str]=99 , __A :int=32 , __A :int=5 , __A :Tuple=4 , __A :str=37 , __A :Optional[int]="gelu" , __A :Tuple=0.1 , __A :str=0.1 , __A :Dict=512 , __A :List[Any]=16 , __A :Dict=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=4 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_choices
def _snake_case ( self :Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxRobertaModelTester(self )
@slow
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase ) | 6 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
A : List[str] = 'realm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1e-3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=1335_3718 , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
# Common config
snake_case_ : Tuple = vocab_size
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Dict = hidden_size
snake_case_ : Optional[Any] = retriever_proj_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[Any] = num_candidates
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : Optional[Any] = layer_norm_eps
# Reader config
snake_case_ : int = span_hidden_size
snake_case_ : str = max_span_width
snake_case_ : Tuple = reader_layer_norm_eps
snake_case_ : List[str] = reader_beam_size
snake_case_ : str = reader_seq_len
# Retrieval config
snake_case_ : Optional[int] = num_block_records
snake_case_ : Tuple = searcher_beam_size
| 568 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowercase : Dict =Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
_lowercase : int ={"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_lowercase : List[Any] ="zero2"
_lowercase : Union[str, Any] ="zero3"
_lowercase : Optional[Any] =[ZEROa, ZEROa]
def A__ ( lowercase: str, lowercase: int, lowercase: int ) -> List[Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A : str =parameterized.to_safe_name('_'.join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_lowercase : Tuple =list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( lowercase__ ):
'''simple docstring'''
@parameterized.expand(__lowercase , name_func=__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@require_torch_multi_gpu
@parameterized.expand(__lowercase , name_func=__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@parameterized.expand(__lowercase , name_func=__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
@require_torch_multi_gpu
@parameterized.expand(__lowercase , name_func=__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
self.run_and_check(
stage=__lowercase , model=__lowercase , distributed=__lowercase , fpaa=__lowercase , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ) -> str:
A : Optional[Any] =models[model]
A : int =self.run_trainer(
stage=__lowercase , model_name=__lowercase , eval_steps=__lowercase , num_train_epochs=1 , distributed=__lowercase , fpaa=__lowercase , )
self.do_checks(__lowercase )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Dict:
A : Optional[Any] =self.get_auto_remove_tmp_dir('./xxx' , after=__lowercase )
A : List[str] =f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__lowercase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A : Tuple =f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A : Union[str, Any] =[f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A : List[Any] =self.get_launcher(__lowercase )
A : Tuple =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowercase , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Tuple:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A : Optional[int] =min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 305 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 0 |
from __future__ import annotations
__a : Optional[int] = list[tuple[int, int]]
__a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : Node | None , ):
"""simple docstring"""
__A = pos_x
__A = pos_y
__A = (pos_y, pos_x)
__A = goal_x
__A = goal_y
__A = g_cost
__A = parent
__A = self.calculate_heuristic()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = abs(self.pos_x - self.goal_x )
__A = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[int] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int] ):
"""simple docstring"""
__A = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase )
__A = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __lowercase )
__A = [self.start]
__A = []
__A = False
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__A = True
return self.retrace_path(__lowercase )
self.closed_nodes.append(__lowercase )
__A = self.get_successors(__lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase )
else:
# retrieve the best current path
__A = self.open_nodes.pop(self.open_nodes.index(__lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase )
else:
self.open_nodes.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Node ):
"""simple docstring"""
__A = []
for action in delta:
__A = parent.pos_x + action[1]
__A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) )
return successors
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Node | None ):
"""simple docstring"""
__A = node
__A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__A = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a : Union[str, Any] = (0, 0)
__a : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__a : Union[str, Any] = GreedyBestFirst(init, goal)
__a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 637 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ):
UpperCamelCase_ : int ={
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
UpperCamelCase_ : Dict =Dataset.from_dict(__lowerCamelCase )
return dataset
class a__ ( lowercase__ ):
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =get_dataset()
UpperCamelCase_ : Dict =make_duplicate_clusters(__lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =get_dataset()
UpperCamelCase_ : Dict =deduplicate_dataset(__lowercase )
self.assertEqual(len(__lowercase ) , 2 )
print(__lowercase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __lowercase )
| 357 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__=True):
'''simple docstring'''
model.train()
a_ : Tuple = model(__lowerCamelCase)
a_ : Optional[int] = F.mse_loss(__lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase)
def _UpperCAmelCase ( a__ , a__=False):
'''simple docstring'''
set_seed(4_2)
a_ : Optional[int] = RegressionModel()
a_ : Tuple = deepcopy(__lowerCamelCase)
a_ : Tuple = RegressionDataset(length=8_0)
a_ : Dict = DataLoader(__lowerCamelCase , batch_size=1_6)
model.to(accelerator.device)
if sched:
a_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3)
a_ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1e-3)
a_ : Any = LambdaLR(__lowerCamelCase , lr_lambda=lambda a__: epoch**0.65)
a_ : str = LambdaLR(__lowerCamelCase , lr_lambda=lambda a__: epoch**0.65)
# Make a copy of `model`
if sched:
a_ : Optional[Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
else:
a_ : Tuple = accelerator.prepare(__lowerCamelCase , __lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[int] = get_training_setup(__lowerCamelCase)
# Use a single batch
a_ : Optional[Any] = next(iter(__lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a_ : Tuple = accelerator.gather((ddp_input, ddp_target))
a_ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration)
a_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase))]
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = get_training_setup(__lowerCamelCase)
# Use a single batch
a_ : Union[str, Any] = next(iter(__lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a_ : Tuple = accelerator.gather((ddp_input, ddp_target))
a_ : Tuple = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration)
a_ : str = ddp_input[torch.randperm(len(__lowerCamelCase))]
def _UpperCAmelCase ( a__=False , a__=False):
'''simple docstring'''
a_ : Tuple = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a_ : str = get_training_setup(__lowerCamelCase)
for iteration, batch in enumerate(__lowerCamelCase):
a_ : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
a_ : List[str] = accelerator.gather((ddp_input, ddp_target))
a_ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration)
a_ : Dict = ddp_input[torch.randperm(len(__lowerCamelCase))]
GradientState._reset_state()
def _UpperCAmelCase ( a__=False , a__=False):
'''simple docstring'''
a_ : Optional[int] = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a_ : Union[str, Any] = get_training_setup(__lowerCamelCase , __lowerCamelCase)
for iteration, batch in enumerate(__lowerCamelCase):
a_ : str = batch.values()
# Gather the distributed inputs and targs for the base model
a_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
a_ : Tuple = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
a_ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration)
GradientState._reset_state()
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = Accelerator()
a_ : Optional[Any] = RegressionDataset(length=8_0)
a_ : Any = DataLoader(__lowerCamelCase , batch_size=1_6)
a_ : Optional[Any] = RegressionDataset(length=9_6)
a_ : Union[str, Any] = DataLoader(__lowerCamelCase , batch_size=1_6)
a_ : Union[str, Any] = accelerator.prepare(__lowerCamelCase , __lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(__lowerCamelCase)
if iteration < len(__lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(__lowerCamelCase)
if batch_num < len(__lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = Accelerator()
a_ : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""")
test_noop_sync(__lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""")
test_distributed_sync(__lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCamelCase , __lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase , __lowerCamelCase)
def _UpperCAmelCase ( a__):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 540 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__magic_name__ : List[str] =logging.get_logger(__name__)
__magic_name__ : Dict ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ : Any ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ : List[str] ={
"yjernite/retribert-base-uncased": 5_12,
}
__magic_name__ : int ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCamelCase_ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Dict = RetriBertTokenizer
UpperCAmelCase__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Dict , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple="[UNK]" , _lowerCamelCase : List[str]="[SEP]" , _lowerCamelCase : Any="[PAD]" , _lowerCamelCase : str="[CLS]" , _lowerCamelCase : List[Any]="[MASK]" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : List[str] , ) -> Optional[int]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowercase ) != tokenize_chinese_chars
):
__magic_name__ = getattr(__lowercase , normalizer_state.pop("type" ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**__lowercase )
__magic_name__ = do_lower_case
def __A ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=None ) -> Union[str, Any]:
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
__magic_name__ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 664 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
_UpperCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
_UpperCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
_UpperCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
_UpperCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
_UpperCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase ( _BaseAutoModelClass ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_UpperCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 146 |
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Optional[int] = 1.0
while num:
a : List[str] = float(input("Gamma of: "))
print(f"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 63 | 0 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCAmelCase__ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCAmelCase__ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
lowerCAmelCase__ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _A ( A__ ):
"""simple docstring"""
def remove_articles(A__ ):
__lowercase = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__lowerCamelCase , ''' ''' , __lowerCamelCase )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
__lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _A ( A__ , A__ ):
"""simple docstring"""
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )]
return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowercase = Counter(__lowerCamelCase )
__lowercase = Counter(__lowerCamelCase )
__lowercase = Counter()
for sgram, scount in sgramcounter.items():
__lowercase = scount * numref
__lowercase = Counter(__lowerCamelCase )
__lowercase = Counter()
for cgram, ccount in cgramcounter.items():
__lowercase = ccount * numref
# KEEP
__lowercase = sgramcounter_rep & cgramcounter_rep
__lowercase = keepgramcounter_rep & rgramcounter
__lowercase = sgramcounter_rep & rgramcounter
__lowercase = 0
__lowercase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase = 1
__lowercase = 1
if len(__lowerCamelCase ) > 0:
__lowercase = keeptmpscorea / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowercase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowercase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowercase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowercase = sgramcounter_rep - cgramcounter_rep
__lowercase = delgramcounter_rep - rgramcounter
__lowercase = sgramcounter_rep - rgramcounter
__lowercase = 0
__lowercase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase = 1
if len(__lowerCamelCase ) > 0:
__lowercase = deltmpscorea / len(__lowerCamelCase )
# ADDITION
__lowercase = set(__lowerCamelCase ) - set(__lowerCamelCase )
__lowercase = set(__lowerCamelCase ) & set(__lowerCamelCase )
__lowercase = set(__lowerCamelCase ) - set(__lowerCamelCase )
__lowercase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase = 1
__lowercase = 1
if len(__lowerCamelCase ) > 0:
__lowercase = addtmpscore / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__lowercase = addtmpscore / len(__lowerCamelCase )
__lowercase = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowercase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = len(__lowerCamelCase )
__lowercase = ssent.split(''' ''' )
__lowercase = csent.split(''' ''' )
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = []
for rsent in rsents:
__lowercase = rsent.split(''' ''' )
__lowercase = []
__lowercase = []
__lowercase = []
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__lowercase = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__lowercase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__lowercase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__lowercase = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__lowercase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__lowercase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__lowercase = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__lowercase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__lowercase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCamelCase )
(__lowercase) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
(__lowercase) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
(__lowercase) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
(__lowercase) = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowercase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowercase = sum([delascore, delascore, delascore, delascore] ) / 4
__lowercase = sum([addascore, addascore, addascore, addascore] ) / 4
__lowercase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _A ( A__ , A__ = True , A__ = "13a" , A__ = True ):
"""simple docstring"""
if lowercase:
__lowercase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowercase = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase )
else:
__lowercase = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase )
elif tokenizer == "moses":
__lowercase = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase )
elif tokenizer == "penn":
__lowercase = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase )
else:
__lowercase = sentence
if not return_str:
__lowercase = normalized_sent.split()
return normalized_sent
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__lowercase = 0
for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] )
__lowercase = sari_score / len(__lowerCamelCase )
return 100 * sari_score
def _A ( A__ , A__ , A__="exp" , A__=None , A__=False , A__=False , A__=False , ):
"""simple docstring"""
__lowercase = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__lowercase = sacrebleu.corpus_bleu(
__lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] ,reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = {}
result.update({'''sari''': compute_sari(sources=__lowercase ,predictions=__lowercase ,references=__lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase ,references=__lowercase )} )
result.update({'''exact''': compute_em(predictions=__lowercase ,references=__lowercase )} )
return result
| 41 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ = {
"""input_ids""": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ = model(__lowercase )["""last_hidden_state"""]
snake_case_ = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
def __UpperCamelCase ( lowercase__ : int = 1000 ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = 2**power
lowerCAmelCase_ : List[Any] = 0
while n:
lowerCAmelCase_ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 600 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
SCREAMING_SNAKE_CASE__ = BlipProcessor(__lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self :int , **__A :Optional[Any] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def _snake_case ( self :Optional[Any] , **__A :int ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def _snake_case ( self :int ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = processor(text=__lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowercase , images=__lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) | 6 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase : str = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__UpperCAmelCase : Any = features.copy() if features else default_expected_features
__UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__UpperCAmelCase : Dict = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[int] = tmp_path / """cache"""
__UpperCAmelCase : str = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase : Optional[int] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Dict = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = tmp_path / """cache"""
__UpperCAmelCase : int = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = tmp_path / """cache"""
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , """tmp.sql""" )
__UpperCAmelCase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 63 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowercase : List[str] = "scheduler_config.json"
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
A : Optional[int] = 1
A : str = 2
A : Tuple = 3
A : Tuple = 4
A : str = 5
@dataclass
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
A : jnp.ndarray
class UpperCAmelCase_ :
'''simple docstring'''
A : List[str] = SCHEDULER_CONFIG_NAME
A : str = ['dtype']
A : str = []
A : Any = True
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
snake_case_ : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=__lowercase , subfolder=__lowercase , return_unused_kwargs=__lowercase , **__lowercase , )
snake_case_ : List[str] = cls.from_config(__lowercase , return_unused_kwargs=__lowercase , **__lowercase )
if hasattr(__lowercase , "create_state" ) and getattr(__lowercase , "has_state" , __lowercase ):
snake_case_ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.save_config(save_directory=__lowercase , push_to_hub=__lowercase , **__lowercase )
@property
def _lowerCAmelCase ( self ) -> Dict:
return self._get_compatibles()
@classmethod
def _lowerCAmelCase ( cls ) -> Any:
snake_case_ : Dict = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : Optional[int] = importlib.import_module(__name__.split("." )[0] )
snake_case_ : Tuple = [
getattr(__lowercase , __lowercase ) for c in compatible_classes_str if hasattr(__lowercase , __lowercase )
]
return compatible_classes
def lowerCAmelCase__ ( _a : jnp.ndarray , _a : Tuple[int] ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCAmelCase__ ( _a : int , _a : Any=0.999 , _a : List[Any]=jnp.floataa ):
def alpha_bar(_a : Tuple ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : str = []
for i in range(__lowerCamelCase ):
snake_case_ : List[str] = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : jnp.ndarray
A : jnp.ndarray
A : jnp.ndarray
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : str = scheduler.config
if config.trained_betas is not None:
snake_case_ : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : Union[str, Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
snake_case_ : Union[str, Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(__lowercase , axis=0 )
return cls(
alphas=__lowercase , betas=__lowercase , alphas_cumprod=__lowercase , )
def lowerCAmelCase__ ( _a : CommonSchedulerState , _a : jnp.ndarray , _a : jnp.ndarray , _a : jnp.ndarray ):
snake_case_ : Dict = state.alphas_cumprod
snake_case_ : int = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Union[str, Any] = sqrt_alpha_prod.flatten()
snake_case_ : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
snake_case_ : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Optional[int] = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase__ ( _a : CommonSchedulerState , _a : jnp.ndarray , _a : jnp.ndarray , _a : jnp.ndarray ):
snake_case_ : str = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase__ ( _a : CommonSchedulerState , _a : jnp.ndarray , _a : jnp.ndarray , _a : jnp.ndarray ):
snake_case_ : List[Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case_ : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 568 |
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple ={
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 637 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__SCREAMING_SNAKE_CASE = soup.find('meta', {'property': 'og:image'})["content"]
__SCREAMING_SNAKE_CASE = requests.get(image_url).content
__SCREAMING_SNAKE_CASE = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 357 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
import re
from filelock import FileLock
try:
import nltk
__snake_case : Optional[int] = True
except (ImportError, ModuleNotFoundError):
__snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _UpperCAmelCase ( a__):
'''simple docstring'''
re.sub("""<n>""" , """""" , __lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase))
| 540 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Dict =logging.get_logger(__name__)
__magic_name__ : Tuple =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __snake_case ( lowerCamelCase_ : Dict ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__magic_name__ = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("encoder" ):
__magic_name__ = k.replace(".attn" , ".self_attn" )
__magic_name__ = k.replace("norm1" , "self_attn_layer_norm" )
__magic_name__ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
__magic_name__ = k.replace("norm1" , "self_attn_layer_norm" )
__magic_name__ = k.replace("norm2" , "encoder_attn_layer_norm" )
__magic_name__ = k.replace("norm3" , "final_layer_norm" )
return k
def __snake_case ( lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
__magic_name__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__magic_name__ = sd.pop(__lowerCamelCase )
__magic_name__ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
__magic_name__ = v
__magic_name__ : Optional[int] =["START"]
@torch.no_grad()
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = torch.load(__lowerCamelCase , map_location="cpu" )
__magic_name__ = model["""model"""]
__magic_name__ = BlenderbotConfig.from_json_file(__lowerCamelCase )
__magic_name__ = BlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = m.model.state_dict().keys()
__magic_name__ = []
__magic_name__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__magic_name__ = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__magic_name__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__magic_name__ : Any =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 664 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
from __future__ import annotations
_UpperCamelCase = "Muhammad Umer Farooq"
_UpperCamelCase = "MIT"
_UpperCamelCase = "1.0.0"
_UpperCamelCase = "Muhammad Umer Farooq"
_UpperCamelCase = "contact@muhammadumerfarooq.me"
_UpperCamelCase = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase ( lowercase__ ):
'''simple docstring'''
def __init__(self , __a ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = []
UpperCAmelCase__ = domain
def UpperCamelCase__ (self , __a , __a ) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCAmelCase__ = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def UpperCamelCase_( snake_case__: str ) -> Optional[int]:
return ".".join(get_sub_domain_name(__lowerCamelCase ).split('.' )[-2:] )
def UpperCamelCase_( snake_case__: str ) -> Union[str, Any]:
return parse.urlparse(__lowerCamelCase ).netloc
def UpperCamelCase_( snake_case__: str = "https://github.com" ) -> Optional[int]:
UpperCAmelCase__ = get_domain_name(__lowerCamelCase )
# Initialize the parser
UpperCAmelCase__ = Parser(__lowerCamelCase )
try:
# Open URL
UpperCAmelCase__ = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCAmelCase__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCAmelCase__ = requests.get(__lowerCamelCase )
# Get the valid email.
UpperCAmelCase__ = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
_UpperCamelCase = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 146 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
SCREAMING_SNAKE_CASE :Optional[Any] = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
SCREAMING_SNAKE_CASE :int = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = WATERMARK_BITS
snake_case_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : torch.FloatTensor ) -> Optional[Any]:
"""simple docstring"""
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
snake_case_ = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = [self.encoder.encode(__lowercase , "dwtDct" ) for image in images]
snake_case_ = torch.from_numpy(np.array(__lowercase ) ).permute(0 , 3 , 1 , 2 )
snake_case_ = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 283 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __a :
__snake_case : int
__snake_case : Node | None = None
__snake_case : Node | None = None
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Node(1 )
lowerCAmelCase_ : int = Node(2 )
lowerCAmelCase_ : Optional[Any] = Node(3 )
lowerCAmelCase_ : Dict = Node(4 )
lowerCAmelCase_ : Tuple = Node(5 )
return tree
def __UpperCamelCase ( lowercase__ : Node | None ) -> Union[str, Any]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __UpperCamelCase ( lowercase__ : Node | None ) -> int:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __UpperCamelCase ( lowercase__ : Node | None ) -> str:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __UpperCamelCase ( lowercase__ : Node | None ) -> Union[str, Any]:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __UpperCamelCase ( lowercase__ : Node | None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : list[Any] = []
if root is None:
return output
lowerCAmelCase_ : Tuple = deque([root] )
while process_queue:
lowerCAmelCase_ : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __UpperCamelCase ( lowercase__ : Node | None , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : list[Any] = []
def populate_output(lowercase__ : Node | None , lowercase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def __UpperCamelCase ( lowercase__ : Node | None , lowercase__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : list[Any] = []
def populate_output(lowercase__ : Node | None , lowercase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def __UpperCamelCase ( lowercase__ : Node | None ) -> Tuple:
'''simple docstring'''
if root is None:
return []
lowerCAmelCase_ : list[Sequence[Node | None]] = []
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
lowerCAmelCase_ : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
lowerCAmelCase_ : Optional[int] = 0
return output
def __UpperCamelCase ( ) -> List[Any]: # Main function for testing.
'''simple docstring'''
lowerCAmelCase_ : List[Any] = make_tree()
print(f'In-order Traversal: {inorder(__lowerCamelCase )}' )
print(f'Pre-order Traversal: {preorder(__lowerCamelCase )}' )
print(f'Post-order Traversal: {postorder(__lowerCamelCase )}' , """\n""" )
print(f'Height of Tree: {height(__lowerCamelCase )}' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 600 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[int] = 'git_vision_model'
def __init__( self : str , __lowercase : List[str]=768 , __lowercase : List[str]=3072 , __lowercase : List[Any]=12 , __lowercase : Dict=12 , __lowercase : int=3 , __lowercase : Any=224 , __lowercase : Optional[int]=16 , __lowercase : Dict="quick_gelu" , __lowercase : Any=1e-5 , __lowercase : str=0.0 , __lowercase : int=0.02 , **__lowercase : int , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : str = patch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__UpperCAmelCase : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'git'
def __init__( self : Optional[int] , __lowercase : List[Any]=None , __lowercase : Tuple=30522 , __lowercase : str=768 , __lowercase : Optional[int]=6 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[int]=3072 , __lowercase : List[str]="gelu" , __lowercase : Tuple=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=1024 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[Any]=1e-1_2 , __lowercase : List[Any]=0 , __lowercase : Dict="absolute" , __lowercase : Dict=True , __lowercase : Any=False , __lowercase : Optional[int]=101 , __lowercase : str=102 , __lowercase : Union[str, Any]=None , **__lowercase : Dict , ) -> Tuple:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__UpperCAmelCase : Tuple = GitVisionConfig(**__lowercase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : int = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[str] = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 63 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int = 16 ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__: List[str] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(UpperCamelCase__: str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main() | 6 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = BarthezTokenizer
a : Any = BarthezTokenizerFast
a : Union[str, Any] = True
a : Union[str, Any] = True
def UpperCAmelCase ( self : Dict ) -> Any:
super().setUp()
__UpperCAmelCase : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
__UpperCAmelCase : str = tokenizer
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase : Dict = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowercase ) , 101122 )
def UpperCAmelCase ( self : Any ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : str = [0, 57, 3018, 70307, 91, 2]
__UpperCAmelCase : List[Any] = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = self.get_rust_tokenizer()
__UpperCAmelCase : str = tokenizer.encode(__lowercase )
__UpperCAmelCase : Tuple = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# fmt: off
__UpperCAmelCase : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase : int = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowercase , )
| 63 | 0 |
lowercase : str = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 568 |
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] = MgpstrTokenizer
lowercase : Any = False
lowercase : int = {}
lowercase : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
super().setUp()
# fmt: off
A : List[str] =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A : List[str] =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
A : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowercase ) + '\n' )
def SCREAMING_SNAKE_CASE_ ( self : Any , **SCREAMING_SNAKE_CASE__ : int ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> Any:
A : int ="""tester"""
A : List[str] ="""tester"""
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
A : int =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A : int ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({'cls_token': special_token} )
A : List[Any] =tokenizer.encode([special_token] , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
A : List[Any] =tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : Any =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A : List[str] =self.get_input_output_texts(__lowercase )
A : Tuple =tokenizer.tokenize(__lowercase )
A : Any =tokenizer.convert_tokens_to_ids(__lowercase )
A : Union[str, Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
A : int =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertNotEqual(len(__lowercase ) , 0 )
A : str =tokenizer.decode(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(text_a.replace(' ' , '' ) , __lowercase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
pass
| 305 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__a : Dict = TypeVar("T")
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> str:
"""simple docstring"""
return (position - 1) // 2
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Any:
"""simple docstring"""
return (2 * position) + 1
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> List[str]:
"""simple docstring"""
return (2 * position) + 2
class __lowercase ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
"""simple docstring"""
__A = []
__A = {}
__A = 0
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.elements
def __repr__( self : str ):
"""simple docstring"""
return str(self.heap )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return self.elements == 0
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : T , UpperCamelCase_ : int ):
"""simple docstring"""
self.heap.append((elem, weight) )
__A = self.elements
self.elements += 1
self._bubble_up(__lowercase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__A = self.heap[0]
self._bubble_down(__lowercase )
return elem
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : T , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.position_map[elem]
__A = (elem, weight)
if position > 0:
__A = get_parent_position(__lowercase )
__A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowercase )
else:
self._bubble_down(__lowercase )
else:
self._bubble_down(__lowercase )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : T ):
"""simple docstring"""
__A = self.position_map[elem]
if curr_pos == 0:
return None
__A = get_parent_position(__lowercase )
__A = self.heap[curr_pos]
__A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_up(__lowercase )
return None
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : T ):
"""simple docstring"""
__A = self.position_map[elem]
__A = self.heap[curr_pos]
__A = get_child_left_position(__lowercase )
__A = get_child_right_position(__lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
__A = self.heap[child_left_position]
__A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
if child_left_position < self.elements:
__A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
else:
return None
if child_right_position < self.elements:
__A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowercase , __lowercase )
return self._bubble_down(__lowercase )
return None
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.heap[nodea_pos][0]
__A = self.heap[nodea_pos][0]
__A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__A = nodea_pos
__A = nodea_pos
class __lowercase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__A = {}
__A = 0
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.connections )
def __len__( self : Any ):
"""simple docstring"""
return self.nodes
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : T ):
"""simple docstring"""
if node not in self.connections:
__A = {}
self.nodes += 1
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : T , UpperCamelCase_ : T , UpperCamelCase_ : int ):
"""simple docstring"""
self.add_node(__lowercase )
self.add_node(__lowercase )
__A = weight
__A = weight
def _SCREAMING_SNAKE_CASE ( __lowercase : GraphUndirectedWeighted[T] , ) -> List[str]:
"""simple docstring"""
__A = {node: maxsize for node in graph.connections}
__A = {node: None for node in graph.connections}
__A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCamelCase , __lowerCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__A = priority_queue.extract_min()
__A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCamelCase , dist[neighbour] )
__A = node
# running prim's algorithm
while not priority_queue.is_empty():
__A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCamelCase , dist[neighbour] )
__A = node
return dist, parent
| 637 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = KandinskyVaaInpaintPipeline
a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
a : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
a : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a : List[Any] = False
@property
def UpperCAmelCase ( self : int ) -> Dict:
return 32
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self.time_input_dim
@property
def UpperCAmelCase ( self : str ) -> List[str]:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return 100
@property
def UpperCAmelCase ( self : Dict ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCAmelCase : int = UNetaDConditionModel(**__lowercase )
return model
@property
def UpperCAmelCase ( self : int ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , )
__UpperCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]:
__UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase : List[str] = 0
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : List[str] = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = """cpu"""
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : str = self.pipeline_class(**__lowercase )
__UpperCAmelCase : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = """a hat"""
__UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCAmelCase : Optional[int] = pipeline(
image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 63 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a__ ( unittest.TestCase ):
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase_ : Any =get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase_ : List[str] =get_activation('gelu' )
UpperCamelCase_ : Tuple =get_activation('gelu_10' )
UpperCamelCase_ : int =torch_builtin(__lowercase )
UpperCamelCase_ : Union[str, Any] =geluaa(__lowercase )
UpperCamelCase_ : Optional[Any] =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__lowercase ):
get_activation('bogus' )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Tuple =get_activation('gelu' )
UpperCamelCase_ : Dict =1
UpperCamelCase_ : Any =get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
UpperCamelCase_ : List[Any] =acta.a
| 357 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _UpperCAmelCase ( a__ , a__=() , a__=None , a__="no" , a__="29500"):
'''simple docstring'''
a_ : int = False
a_ : List[Any] = False
if any(key.startswith("""KAGGLE""") for key in os.environ.keys()):
a_ : Any = True
elif "IPython" in sys.modules:
a_ : Optional[Any] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython())
try:
a_ : Dict = PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''')
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __lowerCamelCase) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""")
if num_processes is None:
a_ : int = 8
a_ : Optional[Any] = PrepareForLaunch(__lowerCamelCase , distributed_type="""TPU""")
print(f'''Launching a training on {num_processes} TPU cores.''')
xmp.spawn(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""")
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""")
else:
print("""Launching training on one CPU.""")
function(*__lowerCamelCase)
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""")
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""")
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""")
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port=__lowerCamelCase , mixed_precision=__lowerCamelCase):
a_ : Dict = PrepareForLaunch(__lowerCamelCase , distributed_type="""MULTI_GPU""")
print(f'''Launching training on {num_processes} GPUs.''')
try:
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""")
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""") from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
a_ : Union[str, Any] = """1"""
print("""Launching training on MPS.""")
elif torch.cuda.is_available():
print("""Launching training on one GPU.""")
else:
print("""Launching training on CPU.""")
function(*__lowerCamelCase)
def _UpperCAmelCase ( a__ , a__=() , a__=2):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
a_ : Tuple = PrepareForLaunch(__lowerCamelCase , debug=__lowerCamelCase)
start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""")
| 540 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__magic_name__ : Tuple =logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **_lowerCamelCase : List[Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__ = deprecated_arg[3:]
__magic_name__ = not kwargs.pop(__lowercase )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
__magic_name__ = kwargs.pop("tpu_name" , self.tpu_name )
__magic_name__ = kwargs.pop("device_idx" , self.device_idx )
__magic_name__ = kwargs.pop("eager_mode" , self.eager_mode )
__magic_name__ = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowercase )
UpperCAmelCase__ : str = field(
default=lowercase__ , metadata={'''help''': '''Name of TPU'''} , )
UpperCAmelCase__ : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
UpperCAmelCase__ : bool = field(default=lowercase__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
UpperCAmelCase__ : bool = field(
default=lowercase__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __A ( self : int ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
__magic_name__ = None
if self.tpu:
try:
if self.tpu_name:
__magic_name__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__magic_name__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__magic_name__ = None
return tpu
@cached_property
def __A ( self : List[str] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__magic_name__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__magic_name__ = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__magic_name__ = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def __A ( self : Optional[Any] ) -> bool:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __A ( self : Dict ) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __A ( self : Any ) -> int:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __A ( self : List[Any] ) -> int:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __A ( self : str ) -> bool:
return self.n_gpu > 0
| 664 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[int] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = 'linear'
a : int = 'cosine'
a : Optional[Any] = 'cosine_with_restarts'
a : Dict = 'polynomial'
a : Tuple = 'constant'
a : Dict = 'constant_with_warmup'
a : Any = 'piecewise_constant'
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCamelCase , lambda __lowerCamelCase : 1 , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1.0 , __lowerCamelCase ) )
return 1.0
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : str , __lowerCamelCase : int = -1 ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = rule_str.split(""":""" )
__UpperCAmelCase : Any = int(__lowerCamelCase )
__UpperCAmelCase : List[str] = float(__lowerCamelCase )
__UpperCAmelCase : int = value
__UpperCAmelCase : Any = float(rule_list[-1] )
def create_rules_function(__lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
def rule_func(__lowerCamelCase : int ) -> float:
__UpperCAmelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : str = create_rules_function(__lowerCamelCase , __lowerCamelCase )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , last_epoch=__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=-1 ):
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optimizer , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 ):
def lr_lambda(__lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-7 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : int=-1 ):
__UpperCAmelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCamelCase ) / float(max(1 , __lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Optional[Any] = lr_init - lr_end
__UpperCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, SchedulerType] , __lowerCamelCase : Optimizer , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = -1 , ):
__UpperCAmelCase : Union[str, Any] = SchedulerType(__lowerCamelCase )
__UpperCAmelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCamelCase , last_epoch=__lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCamelCase , step_rules=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCamelCase , num_warmup_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , num_cycles=__lowerCamelCase , last_epoch=__lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , power=__lowerCamelCase , last_epoch=__lowerCamelCase , )
return schedule_func(
__lowerCamelCase , num_warmup_steps=__lowerCamelCase , num_training_steps=__lowerCamelCase , last_epoch=__lowerCamelCase )
| 63 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase_ : str = logging.getLogger(__name__)
def A__ ( snake_case_ : str=2 , snake_case_ : Dict=3 , snake_case_ : List[str]=16 , snake_case_ : int = 10 , snake_case_ : int = 2 ):
def get_dataset(snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE__: str= get_dataset(snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= get_dataset(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
SCREAMING_SNAKE_CASE__: Optional[int]= DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : str=None ):
SCREAMING_SNAKE_CASE__: Dict= []
for epoch in range(snake_case_ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= batch
SCREAMING_SNAKE_CASE__: Optional[Any]= model(snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= torch.nn.functional.mse_loss(snake_case_ , snake_case_ )
accelerator.backward(snake_case_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _lowerCamelCase ( nn.Module ):
def __init__( self ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE__: Any= nn.Parameter(torch.randn(1 ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return x * self.a + self.b
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__: Optional[Any]= DummyModel()
SCREAMING_SNAKE_CASE__: Optional[int]= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= dummy_dataloaders()
SCREAMING_SNAKE_CASE__: Union[str, Any]= ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase , automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE__: Optional[int]= Accelerator(project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__: Optional[int]= DummyModel()
SCREAMING_SNAKE_CASE__: Optional[int]= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE__: List[Any]= Accelerator()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
SCREAMING_SNAKE_CASE__: Union[str, Any]= os.path.join(lowerCAmelCase , '''initial''' )
accelerator.save_state(lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): Tuple= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Optional[int]= optimizer.state_dict()
SCREAMING_SNAKE_CASE__: Optional[int]= train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): Tuple= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Optional[int]= optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE__: Optional[int]= DummyModel()
SCREAMING_SNAKE_CASE__: Tuple= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= dummy_dataloaders()
SCREAMING_SNAKE_CASE__: Any= Accelerator()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): str= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: List[str]= optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.join(lowerCAmelCase , '''checkpoint''' )
accelerator.save_state(lowerCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): Optional[Any]= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Optional[Any]= optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__: List[Any]= DummyModel()
SCREAMING_SNAKE_CASE__: int= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= dummy_dataloaders()
SCREAMING_SNAKE_CASE__: List[Any]= ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE__: Any= Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): Optional[int]= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Dict= optimizer.state_dict()
SCREAMING_SNAKE_CASE__: int= train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): List[Any]= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Dict= optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE__: List[str]= DummyModel()
SCREAMING_SNAKE_CASE__: List[str]= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= dummy_dataloaders()
SCREAMING_SNAKE_CASE__: Union[str, Any]= ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.load_state(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): Tuple= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: Any= optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= train(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
((SCREAMING_SNAKE_CASE__), (SCREAMING_SNAKE_CASE__)): int= model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE__: List[Any]= optimizer.state_dict()
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE__: Dict= torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE__: int= DummyModel()
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE__: List[str]= Accelerator()
with self.assertRaises(lowerCAmelCase ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__: Tuple= DummyModel()
SCREAMING_SNAKE_CASE__: str= torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__: List[Any]= torch.optim.lr_scheduler.StepLR(lowerCAmelCase , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= dummy_dataloaders()
SCREAMING_SNAKE_CASE__: Optional[Any]= ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase )
# Train baseline
SCREAMING_SNAKE_CASE__: Any= Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE__: List[str]= scheduler.state_dict()
train(3 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(lowerCAmelCase , scheduler.state_dict() )
def UpperCamelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE__: str= DummyModel()
SCREAMING_SNAKE_CASE__: Tuple= ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE__: List[str]= Accelerator(project_dir=lowerCAmelCase , project_config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= accelerator.prepare(lowerCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ : Dict = '/tmp/accelerate/state_checkpointing'
lowercase_ : Optional[Any] = DummyModel()
lowercase_ : List[str] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowercase_ : Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase_ , lowercase_ : List[Any] = dummy_dataloaders()
lowercase_ : str = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase_ : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase_ , lowercase_ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase_ : Union[str, Any] = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowercase_ : Any = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowercase_ : int = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowercase_ : Optional[Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ : Tuple = 'pt'
elif is_tf_available():
lowercase_ : int = 'tf'
else:
lowercase_ : Optional[int] = 'jax'
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PerceiverTokenizer
__a = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__: str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ) -> int:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=20 , lowerCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE__: Dict= []
for i in range(len(lowerCAmelCase ) ):
try:
SCREAMING_SNAKE_CASE__: str= tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE__: Optional[Any]= list(filter(lambda lowerCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase ) , lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
SCREAMING_SNAKE_CASE__: List[Any]= toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
SCREAMING_SNAKE_CASE__: str= toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__: Any= [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__: Optional[Any]= ''' ''' + output_txt
SCREAMING_SNAKE_CASE__: str= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: int= '''Unicode €.'''
SCREAMING_SNAKE_CASE__: Dict= tokenizer(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE__: int= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE__: str= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE__: str= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: Optional[int]= ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE__: str= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE__: Any= tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE__: List[str]= list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: str= ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE__: Any= tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase )
self.assertIn('''attention_mask''' , lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Any= self.perceiver_tokenizer
SCREAMING_SNAKE_CASE__: Dict= [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE__: List[str]= tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCamelCase_ ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__: Dict= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: int= ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__: str= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__: List[str]= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: List[Any]= ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE__: str= tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__: Dict= tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__: Optional[int]= json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__: List[Any]= json.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= [f'<extra_id_{i}>' for i in range(125 )]
SCREAMING_SNAKE_CASE__: Dict= added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE__: List[str]= added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer_class.from_pretrained(
lowerCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__: Optional[int]= added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase )]
SCREAMING_SNAKE_CASE__: int= tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
SCREAMING_SNAKE_CASE__: List[str]= self.get_tokenizers(fast=lowerCAmelCase , do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE__: List[Any]= ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
SCREAMING_SNAKE_CASE__: Dict= tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
| 64 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 1 |
import warnings
from .generation import TFGenerationMixin
class _lowerCamelCase ( UpperCamelCase_ ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCamelCase_ , )
| 64 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 1 |
def A__ ( snake_case_ : int = 1_000 ):
SCREAMING_SNAKE_CASE__: Optional[int]= -1
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__: Dict= (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__: str= n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__: List[Any]= a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__: Union[str, Any]= candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 1 |
import enum
import shutil
import sys
lowercase_ , lowercase_ : Optional[int] = shutil.get_terminal_size()
lowercase_ : Union[str, Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class _lowerCamelCase ( enum.Enum ):
__a = 0
__a = 1
def A__ ( snake_case_ : Dict , snake_case_ : Any="" ):
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Dict="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , snake_case_ )
def A__ ( ):
forceWrite('''\r''' )
def A__ ( snake_case_ : int , snake_case_ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A__ ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def A__ ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase_ : str = re.compile(R'\s+')
def A__ ( snake_case_ : List[str] ):
return {"hash": hashlib.mda(re.sub(snake_case_ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: Any= [len(snake_case_ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(snake_case_ ), "line_max": max(snake_case_ )}
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Optional[int]= np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def A__ ( snake_case_ : List[str] , snake_case_ : str ):
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Dict=5 ):
SCREAMING_SNAKE_CASE__: Tuple= ['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__: Any= example['''content'''].splitlines()
for _, line in zip(range(snake_case_ ) , snake_case_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A__ ( snake_case_ : Dict , snake_case_ : int=5 , snake_case_ : Tuple=0.05 ):
SCREAMING_SNAKE_CASE__: List[Any]= ['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__: Optional[int]= example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__: Tuple= 0
SCREAMING_SNAKE_CASE__: Tuple= 0
# first test
for _, line in zip(range(snake_case_ ) , snake_case_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__: Union[str, Any]= example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__: Tuple= int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Dict= ['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__: str= example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A__ ( snake_case_ : Optional[Any] , snake_case_ : int=4 ):
SCREAMING_SNAKE_CASE__: Optional[int]= example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__: List[Any]= 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A__ ( snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer(example['''content'''] , truncation=snake_case_ )['''input_ids''']
SCREAMING_SNAKE_CASE__: Dict= len(example['''content'''] ) / len(snake_case_ )
return {"ratio": ratio}
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[Any]= {}
results.update(get_hash(snake_case_ ) )
results.update(line_stats(snake_case_ ) )
results.update(alpha_stats(snake_case_ ) )
results.update(char_token_ratio(snake_case_ ) )
results.update(is_autogenerated(snake_case_ ) )
results.update(is_config_or_test(snake_case_ ) )
results.update(has_no_keywords(snake_case_ ) )
results.update(has_few_assignments(snake_case_ ) )
return results
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int ):
if not check_uniques(snake_case_ , snake_case_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A__ ( snake_case_ : Union[str, Any] ):
with open(snake_case_ , '''rb''' ) as f_in:
with gzip.open(str(snake_case_ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case_ , snake_case_ )
os.unlink(snake_case_ )
# Settings
lowercase_ : Dict = HfArgumentParser(PreprocessingArguments)
lowercase_ : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
lowercase_ : List[str] = multiprocessing.cpu_count()
lowercase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase_ : Optional[int] = time.time()
lowercase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
lowercase_ : List[Any] = time.time()
lowercase_ : Dict = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
lowercase_ : Optional[int] = set(ds.unique('hash'))
lowercase_ : Optional[int] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
lowercase_ : List[Any] = time.time()
lowercase_ : List[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase_ : str = time.time()
lowercase_ , lowercase_ : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
lowercase_ : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowercase_ : Optional[int] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowercase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase_ : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''')
lowercase_ : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= data
SCREAMING_SNAKE_CASE__: Node | None= None
class _lowerCamelCase :
def __init__( self ) -> Any:
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Any= None
def __iter__( self ) -> Iterator[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__: Optional[int]= node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
self.insert_nth(len(self ) , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
self.insert_nth(0 , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= Node(lowerCAmelCase )
if self.head is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= new_node # first node points itself
SCREAMING_SNAKE_CASE__: Optional[int]= new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.head
SCREAMING_SNAKE_CASE__: Any= new_node
else:
SCREAMING_SNAKE_CASE__: Dict= self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= temp.next
SCREAMING_SNAKE_CASE__: Optional[Any]= temp.next
SCREAMING_SNAKE_CASE__: List[Any]= new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__: Tuple= new_node
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.delete_nth(0 )
def UpperCamelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self , lowerCAmelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__: Any= self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__: str= None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__: int= self.tail.next.next
SCREAMING_SNAKE_CASE__: Dict= self.head.next
else:
SCREAMING_SNAKE_CASE__: Dict= self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__: Any= temp.next
SCREAMING_SNAKE_CASE__: List[str]= temp.next
SCREAMING_SNAKE_CASE__: int= temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__: List[str]= temp
return delete_node.data
def UpperCamelCase_ ( self ) -> bool:
return len(self ) == 0
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= CircularLinkedList()
assert len(snake_case_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case_ ) == i
circular_linked_list.insert_nth(snake_case_ , i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
SCREAMING_SNAKE_CASE__: int= len(snake_case_ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A__ ( snake_case_ : List[Any] ):
if len(snake_case_ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ ) // 2
SCREAMING_SNAKE_CASE__: Dict= arr[0:mid]
SCREAMING_SNAKE_CASE__: Optional[int]= arr[mid:]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= count_inversions_recursive(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= count_inversions_recursive(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= _count_cross_inversions(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A__ ( snake_case_ : Dict , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: List[Any]= []
SCREAMING_SNAKE_CASE__: Dict= 0
while i < len(snake_case_ ) and j < len(snake_case_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[Any]= [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__: Optional[int]= count_inversions_bf(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , snake_case_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__: Any= count_inversions_bf(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case_ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__: Optional[Any]= []
SCREAMING_SNAKE_CASE__: List[str]= count_inversions_bf(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case_ )
if __name__ == "__main__":
main()
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
from datetime import datetime as dt
import os
from github import Github
lowercase_ : int = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def A__ ( ):
SCREAMING_SNAKE_CASE__: int= Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__: int= repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__: Optional[int]= sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
def A__ ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
SCREAMING_SNAKE_CASE__: str= []
def generate(snake_case_ : int , snake_case_ : list ):
SCREAMING_SNAKE_CASE__: int= [0] * n
res.append(tuple(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Tuple= 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= arr[i], arr[c[i]]
res.append(tuple(snake_case_ ) )
c[i] += 1
SCREAMING_SNAKE_CASE__: Dict= 0
else:
SCREAMING_SNAKE_CASE__: Any= 0
i += 1
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
lowercase_ : Any = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
lowercase_ : str = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from __future__ import annotations
from math import pi
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
lowercase_ : Optional[Any] = tuple[float, float, float]
lowercase_ : Dict = tuple[float, float, float]
def A__ ( snake_case_ : Pointad , snake_case_ : Pointad ):
SCREAMING_SNAKE_CASE__: Tuple= end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE__: int= end_pointa[2] - end_pointa[2]
return (x, y, z)
def A__ ( snake_case_ : Vectorad , snake_case_ : Vectorad ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE__: Tuple= (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE__: Optional[Any]= ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A__ ( snake_case_ : Vectorad , snake_case_ : int ):
return tuple(round(snake_case_ , snake_case_ ) for x in vector ) == (0, 0, 0)
def A__ ( snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : int = 10 ):
SCREAMING_SNAKE_CASE__: int= create_vector(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= create_vector(snake_case_ , snake_case_ )
return is_zero_vector(get_ad_vectors_cross(snake_case_ , snake_case_ ) , snake_case_ )
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "table-transformer"
__a = ["past_key_values"]
__a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=3 , lowerCAmelCase=100 , lowerCAmelCase=6 , lowerCAmelCase=2048 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=2048 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=256 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1.0 , lowerCAmelCase=False , lowerCAmelCase="sine" , lowerCAmelCase="resnet50" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , **lowerCAmelCase , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE__: int= CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__: Union[str, Any]= config_class.from_dict(lowerCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= None, None, None
SCREAMING_SNAKE_CASE__: Optional[Any]= use_timm_backbone
SCREAMING_SNAKE_CASE__: Optional[Any]= backbone_config
SCREAMING_SNAKE_CASE__: Dict= num_channels
SCREAMING_SNAKE_CASE__: Optional[Any]= num_queries
SCREAMING_SNAKE_CASE__: Tuple= d_model
SCREAMING_SNAKE_CASE__: Dict= encoder_ffn_dim
SCREAMING_SNAKE_CASE__: Optional[int]= encoder_layers
SCREAMING_SNAKE_CASE__: Union[str, Any]= encoder_attention_heads
SCREAMING_SNAKE_CASE__: Optional[Any]= decoder_ffn_dim
SCREAMING_SNAKE_CASE__: Optional[int]= decoder_layers
SCREAMING_SNAKE_CASE__: int= decoder_attention_heads
SCREAMING_SNAKE_CASE__: Optional[int]= dropout
SCREAMING_SNAKE_CASE__: Any= attention_dropout
SCREAMING_SNAKE_CASE__: Tuple= activation_dropout
SCREAMING_SNAKE_CASE__: Union[str, Any]= activation_function
SCREAMING_SNAKE_CASE__: Optional[Any]= init_std
SCREAMING_SNAKE_CASE__: Tuple= init_xavier_std
SCREAMING_SNAKE_CASE__: List[str]= encoder_layerdrop
SCREAMING_SNAKE_CASE__: Dict= decoder_layerdrop
SCREAMING_SNAKE_CASE__: int= encoder_layers
SCREAMING_SNAKE_CASE__: Optional[Any]= auxiliary_loss
SCREAMING_SNAKE_CASE__: Dict= position_embedding_type
SCREAMING_SNAKE_CASE__: List[str]= backbone
SCREAMING_SNAKE_CASE__: Union[str, Any]= use_pretrained_backbone
SCREAMING_SNAKE_CASE__: Any= dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__: Any= class_cost
SCREAMING_SNAKE_CASE__: Any= bbox_cost
SCREAMING_SNAKE_CASE__: Optional[Any]= giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__: Optional[Any]= mask_loss_coefficient
SCREAMING_SNAKE_CASE__: str= dice_loss_coefficient
SCREAMING_SNAKE_CASE__: Optional[int]= bbox_loss_coefficient
SCREAMING_SNAKE_CASE__: Tuple= giou_loss_coefficient
SCREAMING_SNAKE_CASE__: Optional[Any]= eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def UpperCamelCase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self ) -> int:
return self.d_model
class _lowerCamelCase ( UpperCamelCase_ ):
__a = version.parse("1.11" )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
return 1e-5
@property
def UpperCamelCase_ ( self ) -> int:
return 12
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Any ):
# load base model
SCREAMING_SNAKE_CASE__: int= StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE__: List[Any]= load_file(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE__: str= key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
SCREAMING_SNAKE_CASE__: List[str]= pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE__: int= layer_infos.pop(0 )
while len(snake_case_ ) > -1:
try:
SCREAMING_SNAKE_CASE__: Dict= curr_layer.__getattr__(snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Union[str, Any]= layer_infos.pop(0 )
elif len(snake_case_ ) == 0:
break
except Exception:
if len(snake_case_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= layer_infos.pop(0 )
SCREAMING_SNAKE_CASE__: List[str]= []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(snake_case_ )
else:
pair_keys.append(snake_case_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE__: str= state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE__: Any= state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE__: List[Any]= state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE__: Dict= state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ )
# update visited list
for item in pair_keys:
visited.append(snake_case_ )
return pipeline
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
lowercase_ : Optional[int] = parser.parse_args()
lowercase_ : Optional[int] = args.base_model_path
lowercase_ : Optional[int] = args.checkpoint_path
lowercase_ : str = args.dump_path
lowercase_ : List[str] = args.lora_prefix_unet
lowercase_ : Dict = args.lora_prefix_text_encoder
lowercase_ : int = args.alpha
lowercase_ : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ : Any = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 64 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 1 |
def A__ ( snake_case_ : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Dict= test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'{test_file} instead.' )
SCREAMING_SNAKE_CASE__: Optional[int]= components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
SCREAMING_SNAKE_CASE__: int= components[:-1] + [test_fn.replace('''.py''' , '''''' )]
SCREAMING_SNAKE_CASE__: List[str]= '''.'''.join(snake_case_ )
return test_module_path
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Dict= get_module_path(snake_case_ )
SCREAMING_SNAKE_CASE__: int= importlib.import_module(snake_case_ )
return test_module
def A__ ( snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Any= []
SCREAMING_SNAKE_CASE__: Optional[Any]= get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
SCREAMING_SNAKE_CASE__: Dict= get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
SCREAMING_SNAKE_CASE__: str= getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__: List[Any]= getattr(snake_case_ , '''all_model_classes''' , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Tuple= test_class()
if hasattr(snake_case_ , '''setUp''' ):
test.setUp()
SCREAMING_SNAKE_CASE__: List[str]= None
if hasattr(snake_case_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__: List[Any]= test.model_tester.__class__
return model_tester
def A__ ( snake_case_ : List[Any] , snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Tuple= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Dict , snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: str= get_test_classes_for_model(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
for test_class in test_classes:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[str]= get_model_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Any= {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: int= get_model_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def A__ ( snake_case_ : Union[str, Any] ):
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 64 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 1 |
def A__ ( snake_case_ : int = 10 , snake_case_ : int = 22 ):
SCREAMING_SNAKE_CASE__: List[str]= range(1 , snake_case_ )
SCREAMING_SNAKE_CASE__: str= range(1 , snake_case_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 64 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 1 |
import os
import pytest
from attr import dataclass
lowercase_ : Optional[Any] = 'us-east-1' # defaults region
@dataclass
class _lowerCamelCase :
__a = 42
__a = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__a = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
__a = {**hyperparameters, "max_steps": 1000}
@property
def UpperCamelCase_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase_ ( self ) -> str:
return f'{self.framework}-transfromers-test'
@property
def UpperCamelCase_ ( self ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def UpperCamelCase_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Dict= SageMakerTestEnvironment(framework=request.cls.framework )
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase_ : List[str] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["pixel_values"]
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ) -> None:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE__: Any= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
SCREAMING_SNAKE_CASE__: Tuple= get_size_dict(lowerCAmelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__: List[str]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[int]= resample
SCREAMING_SNAKE_CASE__: Any= do_rescale
SCREAMING_SNAKE_CASE__: str= rescale_factor
SCREAMING_SNAKE_CASE__: str= do_center_crop
SCREAMING_SNAKE_CASE__: int= crop_size
SCREAMING_SNAKE_CASE__: List[str]= do_flip_channel_order
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PIL.Image.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Any= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__: Dict= get_resize_output_image_size(lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> List[Any]:
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__: int= do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__: List[str]= resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__: List[str]= do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__: Optional[int]= rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__: str= do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__: Optional[Any]= (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE__: List[Any]= size if size is not None else self.size
SCREAMING_SNAKE_CASE__: int= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__: Optional[Any]= get_size_dict(lowerCAmelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__: Tuple= make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__: Dict= [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__: Any= [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__: Dict= [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__: Dict= [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE__: Any= [self.flip_channel_order(image=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: int= [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: Dict= {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Any= target_sizes.numpy()
SCREAMING_SNAKE_CASE__: Any= []
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: str= torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[Any]= logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__: List[Any]= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 64 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCamelCase ( UpperCamelCase_ ):
@require_torch
def UpperCamelCase_ ( self ) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE__: Any= '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
SCREAMING_SNAKE_CASE__: List[Any]= '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
SCREAMING_SNAKE_CASE__: Dict= '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='''fill-mask''' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE__: List[str]= [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE__: Any= self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''1'''
SCREAMING_SNAKE_CASE__: str= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> str:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE__: Optional[Any]= '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
SCREAMING_SNAKE_CASE__: Any= '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
SCREAMING_SNAKE_CASE__: Optional[int]= '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE__: Optional[Any]= '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='''fill-mask''' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE__: Optional[int]= [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_env()
SCREAMING_SNAKE_CASE__: Tuple= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
SCREAMING_SNAKE_CASE__: Any= '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
SCREAMING_SNAKE_CASE__: int= '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE__: List[Any]= [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_env()
SCREAMING_SNAKE_CASE__: Optional[int]= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE__: Tuple= [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE__: List[str]= '''1'''
SCREAMING_SNAKE_CASE__: Optional[int]= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= '''
from transformers import pipeline
'''
SCREAMING_SNAKE_CASE__: Dict= '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
SCREAMING_SNAKE_CASE__: Optional[int]= '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
SCREAMING_SNAKE_CASE__: Any= self.get_env()
SCREAMING_SNAKE_CASE__: Optional[Any]= '''1'''
SCREAMING_SNAKE_CASE__: Any= [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
SCREAMING_SNAKE_CASE__: Tuple= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= '''
from transformers import AutoModel
'''
SCREAMING_SNAKE_CASE__: Dict= '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE__: str= [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE__: int= self.get_env()
SCREAMING_SNAKE_CASE__: Union[str, Any]= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE__: Optional[int]= '''1'''
SCREAMING_SNAKE_CASE__: int= subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 64 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase_ : Optional[Any] = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: int= data
SCREAMING_SNAKE_CASE__: Any= self
SCREAMING_SNAKE_CASE__: List[Any]= 0
class _lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
SCREAMING_SNAKE_CASE__: dict[T, DisjointSetTreeNode[T]]= {}
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
# create a new set with x as its member
SCREAMING_SNAKE_CASE__: List[str]= DisjointSetTreeNode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
SCREAMING_SNAKE_CASE__: Dict= self.map[data]
if elem_ref != elem_ref.parent:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
SCREAMING_SNAKE_CASE__: Union[str, Any]= nodea
else:
SCREAMING_SNAKE_CASE__: List[str]= nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(lowerCAmelCase ) , self.find_set(lowerCAmelCase ) )
class _lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
SCREAMING_SNAKE_CASE__: dict[T, dict[T, int]]= {}
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE__: Tuple= {}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
# add an edge with the given weight
self.add_node(lowerCAmelCase )
self.add_node(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= weight
SCREAMING_SNAKE_CASE__: Dict= weight
def UpperCamelCase_ ( self ) -> GraphUndirectedWeighted[T]:
SCREAMING_SNAKE_CASE__: Any= []
SCREAMING_SNAKE_CASE__: Union[str, Any]= set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase : x[2] )
# creating the disjoint set
SCREAMING_SNAKE_CASE__: Union[str, Any]= DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase )
# MST generation
SCREAMING_SNAKE_CASE__: str= 0
SCREAMING_SNAKE_CASE__: List[Any]= 0
SCREAMING_SNAKE_CASE__: str= GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= edges[index]
index += 1
SCREAMING_SNAKE_CASE__: Optional[int]= disjoint_set.find_set(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= disjoint_set.find_set(lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
disjoint_set.union(lowerCAmelCase , lowerCAmelCase )
return graph
| 64 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "vit_mae"
def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=224 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=16 , lowerCAmelCase=512 , lowerCAmelCase=8 , lowerCAmelCase=2048 , lowerCAmelCase=0.75 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[int]:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= hidden_size
SCREAMING_SNAKE_CASE__: List[str]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Dict= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= intermediate_size
SCREAMING_SNAKE_CASE__: Tuple= hidden_act
SCREAMING_SNAKE_CASE__: Optional[int]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: List[Any]= layer_norm_eps
SCREAMING_SNAKE_CASE__: int= image_size
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: str= num_channels
SCREAMING_SNAKE_CASE__: Tuple= qkv_bias
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_num_attention_heads
SCREAMING_SNAKE_CASE__: Tuple= decoder_hidden_size
SCREAMING_SNAKE_CASE__: List[Any]= decoder_num_hidden_layers
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_intermediate_size
SCREAMING_SNAKE_CASE__: List[Any]= mask_ratio
SCREAMING_SNAKE_CASE__: List[str]= norm_pix_loss
| 64 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''num_heads''' ) )
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=[16, 48, 96] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 10] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: int= batch_size
SCREAMING_SNAKE_CASE__: Optional[Any]= image_size
SCREAMING_SNAKE_CASE__: List[Any]= patch_sizes
SCREAMING_SNAKE_CASE__: Union[str, Any]= patch_stride
SCREAMING_SNAKE_CASE__: List[str]= patch_padding
SCREAMING_SNAKE_CASE__: str= is_training
SCREAMING_SNAKE_CASE__: int= use_labels
SCREAMING_SNAKE_CASE__: Optional[Any]= num_labels
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: str= embed_dim
SCREAMING_SNAKE_CASE__: Optional[int]= num_heads
SCREAMING_SNAKE_CASE__: int= stride_kv
SCREAMING_SNAKE_CASE__: Dict= depth
SCREAMING_SNAKE_CASE__: Optional[Any]= cls_token
SCREAMING_SNAKE_CASE__: int= attention_drop_rate
SCREAMING_SNAKE_CASE__: Any= initializer_range
SCREAMING_SNAKE_CASE__: int= layer_norm_eps
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: List[str]= None
if self.use_labels:
# create a random int32 tensor of given shape
SCREAMING_SNAKE_CASE__: Dict= ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= TFCvtModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , training=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
SCREAMING_SNAKE_CASE__: List[Any]= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.num_labels
SCREAMING_SNAKE_CASE__: Union[str, Any]= TFCvtForImageClassification(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= config_and_inputs
SCREAMING_SNAKE_CASE__: Union[str, Any]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__a = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= TFCvtModelTester(self )
SCREAMING_SNAKE_CASE__: List[Any]= TFCvtConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def UpperCamelCase_ ( self ) -> str:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: List[str]= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: Optional[Any]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: List[str]= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: List[Any]= outputs.hidden_states
SCREAMING_SNAKE_CASE__: str= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__: Optional[int]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> int:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: Any= TFCvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[int]= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__: Tuple= self.default_image_processor
SCREAMING_SNAKE_CASE__: Union[str, Any]= prepare_img()
SCREAMING_SNAKE_CASE__: Optional[int]= image_processor(images=lowerCAmelCase , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE__: List[str]= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: Optional[Any]= tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase , atol=1e-4 ) )
| 64 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[str]= 1
SCREAMING_SNAKE_CASE__: Dict= 3
SCREAMING_SNAKE_CASE__: Any= (32, 32)
SCREAMING_SNAKE_CASE__: str= floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def UpperCamelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase_ ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def UpperCamelCase_ ( self ) -> str:
def extract(*lowerCAmelCase , **lowerCAmelCase ):
class _lowerCamelCase :
def __init__( self ) -> int:
SCREAMING_SNAKE_CASE__: Any= torch.ones([0] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: int= self.dummy_cond_unet
SCREAMING_SNAKE_CASE__: Union[str, Any]= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Any= self.dummy_vae
SCREAMING_SNAKE_CASE__: Dict= self.dummy_text_encoder
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__: Optional[Any]= StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: Any= torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE__: Dict= output.images
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE__: List[Any]= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: Optional[int]= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: Dict= np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_cond_unet
SCREAMING_SNAKE_CASE__: Any= PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.dummy_vae
SCREAMING_SNAKE_CASE__: int= self.dummy_text_encoder
SCREAMING_SNAKE_CASE__: Dict= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__: List[str]= StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__: List[str]= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: int= torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Dict= sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
SCREAMING_SNAKE_CASE__: Dict= output.images
SCREAMING_SNAKE_CASE__: int= torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE__: int= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: str= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: Dict= np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Dict= StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__: Optional[int]= pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__: List[str]= pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= self.dummy_cond_unet
SCREAMING_SNAKE_CASE__: Dict= PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_vae
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.dummy_text_encoder
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
SCREAMING_SNAKE_CASE__: Optional[Any]= unet.half()
SCREAMING_SNAKE_CASE__: str= vae.half()
SCREAMING_SNAKE_CASE__: str= bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__: str= StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__: Dict= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: int= sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Dict= StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__: Tuple= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
SCREAMING_SNAKE_CASE__: Any= 4003660346
SCREAMING_SNAKE_CASE__: int= 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__: Dict= output.images
SCREAMING_SNAKE_CASE__: List[str]= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: Union[str, Any]= [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE__: Dict= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__: List[str]= output.images
SCREAMING_SNAKE_CASE__: List[str]= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: str= [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= '''padme amidala taking a bath artwork, safe for work, no nudity'''
SCREAMING_SNAKE_CASE__: Tuple= 2734971755
SCREAMING_SNAKE_CASE__: Union[str, Any]= 7
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__: int= output.images
SCREAMING_SNAKE_CASE__: Union[str, Any]= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: List[Any]= [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE__: Dict= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__: str= output.images
SCREAMING_SNAKE_CASE__: int= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: Any= [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
SCREAMING_SNAKE_CASE__: Dict= 1044355234
SCREAMING_SNAKE_CASE__: int= 12
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__: int= output.images
SCREAMING_SNAKE_CASE__: Optional[Any]= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: Any= np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE__: Dict= torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__: List[Any]= output.images
SCREAMING_SNAKE_CASE__: Tuple= image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__: List[Any]= np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 64 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ : List[Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def A__ ( snake_case_ : str = "mumbai" ):
SCREAMING_SNAKE_CASE__: List[str]= BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
SCREAMING_SNAKE_CASE__: List[str]= job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
SCREAMING_SNAKE_CASE__: Optional[int]= job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> int:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : Optional[int] = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['GLPNFeatureExtractor']
lowercase_ : str = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Tuple ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__: List[str]= LxmertConfig.from_json_file(snake_case_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE__: List[str]= LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase_ : Tuple = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowercase_ : Optional[int] = 'hopper-medium-v2'
lowercase_ : Union[str, Any] = gym.make(env_name)
lowercase_ : Dict = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowercase_ : List[str] = env.reset()
lowercase_ : Dict = 0
lowercase_ : Any = 0
lowercase_ : Optional[int] = 1_0_0_0
lowercase_ : List[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase_ : Any = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = env.step(denorm_actions)
lowercase_ : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase_ : Any = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[int] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["audio_values", "audio_mask"]
def __init__( self , lowerCAmelCase=2048 , lowerCAmelCase=1 , lowerCAmelCase=[16, 16] , lowerCAmelCase=128 , lowerCAmelCase=44100 , lowerCAmelCase=86 , lowerCAmelCase=2048 , lowerCAmelCase=0.0 , **lowerCAmelCase , ) -> Optional[int]:
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= spectrogram_length
SCREAMING_SNAKE_CASE__: Any= num_channels
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: str= feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE__: List[str]= n_fft
SCREAMING_SNAKE_CASE__: Union[str, Any]= sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= sampling_rate
SCREAMING_SNAKE_CASE__: Union[str, Any]= padding_value
SCREAMING_SNAKE_CASE__: int= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCamelCase_ ( self , lowerCAmelCase ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: int= spectrogram(
lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE__: Dict= log_spec[:, :-1]
SCREAMING_SNAKE_CASE__: str= log_spec - 20.0
SCREAMING_SNAKE_CASE__: str= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__: Tuple= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__: int= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__: Tuple= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: List[str]= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__: int= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE__: Tuple= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE__: str= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE__: str= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE__: Optional[Any]= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE__: Tuple= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Dict= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: Dict= audio_features[i]
SCREAMING_SNAKE_CASE__: List[Any]= feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE__: str= {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
SCREAMING_SNAKE_CASE__: int= {'''audio_values''': padded_audio_features}
SCREAMING_SNAKE_CASE__: Dict= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
def A__ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case_ ) )
def A__ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ):
# Base Case
if index == len(snake_case_ ):
return True
# Recursive Step
for i in range(snake_case_ ):
if valid_coloring(graph[index] , snake_case_ , snake_case_ ):
# Color current vertex
SCREAMING_SNAKE_CASE__: Optional[Any]= i
# Validate coloring
if util_color(snake_case_ , snake_case_ , snake_case_ , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE__: Optional[int]= -1
return False
def A__ ( snake_case_ : list[list[int]] , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= [-1] * len(snake_case_ )
if util_color(snake_case_ , snake_case_ , snake_case_ , 0 ):
return colored_vertices
return []
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ : Optional[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def A__ ( snake_case_ : Optional[Any] ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def A__ ( snake_case_ : int ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def A__ ( snake_case_ : Dict ):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__: Dict= terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
def A__ ( snake_case_ : int , snake_case_ : Dict ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
# Doctest custom flag to ignore output.
lowercase_ : Optional[Any] = doctest.register_optionflag('IGNORE_RESULT')
lowercase_ : Dict = doctest.OutputChecker
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowercase_ : Any = CustomOutputChecker
lowercase_ : List[str] = HfDoctestModule
lowercase_ : int = HfDocTestParser
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
from PIL import Image
def A__ ( snake_case_ : Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= image.size
SCREAMING_SNAKE_CASE__: Dict= 0
SCREAMING_SNAKE_CASE__: str= image.load()
for i in range(snake_case_ ):
for j in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[Any]= pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case_ ):
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: int= 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase_ : Tuple = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]:
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE__: Any= self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE__: Any= str(distributed_port + 1 )
SCREAMING_SNAKE_CASE__: int= dist.new_group(ranks=lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> List[str]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=torch.floataa ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= torch.empty(lowerCAmelCase , dtype=lowerCAmelCase )
dist.scatter(lowerCAmelCase , src=0 , scatter_list=lowerCAmelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE__: int= next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase )
return ifname
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
# distributed training
SCREAMING_SNAKE_CASE__: List[str]= dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self._is_main():
SCREAMING_SNAKE_CASE__: List[Any]= [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase )]
dist.gather(torch.tensor(lowerCAmelCase ) , dst=0 , gather_list=lowerCAmelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE__: Any= question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= []
SCREAMING_SNAKE_CASE__: str= []
if self._is_main():
assert len(lowerCAmelCase ) == world_size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self._main_retrieve(torch.cat(lowerCAmelCase ).numpy() , lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self._scattered(lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._scattered(lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase )
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.