code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_A : Optional[int] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def __snake_case ( lowerCAmelCase_=True ) -> Union[str, Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Any = None
lowerCamelCase__ : Optional[int] = None
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = dataset_module_factory(A_ , cache_dir=A_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=A_ )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=A_ , config_name=A_ , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE__ = cached_path(A_ , cache_dir=A_ )
self.assertTrue(os.path.exists(A_ ) )
@pytest.mark.integration
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
SCREAMING_SNAKE_CASE__ = dataset_module_factory('''wikipedia''' , cache_dir=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=lowerCAmelCase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE__ = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case ( lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = dataset_module_factory('''wikipedia''' , cache_dir=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=lowerCAmelCase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowerCAmelCase_ )
assert next(iter(ds['''train'''] ) )
| 100 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "arrow" , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 704 |
'''simple docstring'''
from ....utils import logging
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2048 ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = config.__dict__
lowerCamelCase_ = modal_hidden_size
if num_labels:
lowerCamelCase_ = num_labels
| 384 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
_lowerCamelCase : Tuple = str(A_ )
_lowerCamelCase : int = ''''''.join(sorted(A_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case_ ( A_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[str] = 1
while True:
if check_bouncy(A_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 83 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="openai/whisper-base"
SCREAMING_SNAKE_CASE_ : List[str] =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE_ : int ="transcriber"
SCREAMING_SNAKE_CASE_ : Tuple =WhisperProcessor
SCREAMING_SNAKE_CASE_ : List[Any] =WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["audio"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_features
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(inputs=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
| 282 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase=0.01 , UpperCamelCase=1_000 ):
_SCREAMING_SNAKE_CASE = p_stop
_SCREAMING_SNAKE_CASE = max_length
def __iter__( self ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = False
while not stop and count < self.max_length:
yield count
count += 1
_SCREAMING_SNAKE_CASE = random.random() < self.p_stop
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ):
_SCREAMING_SNAKE_CASE = [
BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
for i in range(2 )
]
_SCREAMING_SNAKE_CASE = [list(UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase ) for shard in batch_sampler_shards] , [len(UpperCamelCase ) for e in expected] )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowercase ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
def lowercase ( self ):
# Check the shards when the dataset is a round multiple of batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
def lowercase ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
def lowercase ( self ):
# Check the shards when the dataset is a round multiple of batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_SCREAMING_SNAKE_CASE = [BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , even_batches=UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=2 , UpperCamelCase=False ):
random.seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = list(UpperCamelCase )
_SCREAMING_SNAKE_CASE = [
IterableDatasetShard(
UpperCamelCase , batch_size=UpperCamelCase , drop_last=UpperCamelCase , num_processes=UpperCamelCase , process_index=UpperCamelCase , split_batches=UpperCamelCase , )
for i in range(UpperCamelCase )
]
_SCREAMING_SNAKE_CASE = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase )
iterable_dataset_lists.append(list(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_SCREAMING_SNAKE_CASE = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
self.assertTrue(len(UpperCamelCase ) % shard_batch_size == 0 )
_SCREAMING_SNAKE_CASE = []
for idx in range(0 , len(UpperCamelCase ) , UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase ) < len(UpperCamelCase ):
reference += reference
self.assertListEqual(UpperCamelCase , reference[: len(UpperCamelCase )] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
# Edge case with a very small dataset
_SCREAMING_SNAKE_CASE = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase )
_SCREAMING_SNAKE_CASE = SkipBatchSampler(UpperCamelCase , 2 )
self.assertListEqual(list(UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = DataLoader(list(range(16 ) ) , batch_size=4 )
_SCREAMING_SNAKE_CASE = skip_first_batches(UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase ( self ):
Accelerator()
_SCREAMING_SNAKE_CASE = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 493 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase ( __UpperCAmelCase ):
a : Tuple = """convbert"""
def __init__( self , UpperCamelCase=30_522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3_072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase=768 , UpperCamelCase=2 , UpperCamelCase=9 , UpperCamelCase=1 , UpperCamelCase=None , **UpperCamelCase , ):
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = embedding_size
_SCREAMING_SNAKE_CASE = head_ratio
_SCREAMING_SNAKE_CASE = conv_kernel_size
_SCREAMING_SNAKE_CASE = num_groups
_SCREAMING_SNAKE_CASE = classifier_dropout
class lowerCAmelCase ( __UpperCAmelCase ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 493 | 1 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
snake_case : Optional[int] =0
for ch in input_str:
snake_case : List[Any] =ord(A_ )
snake_case : Union[str, Any] =pow(2 , A_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowerCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 316 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """instructblip_vision_model"""
def __init__( self , UpperCAmelCase_=1_408 , UpperCAmelCase_=6_144 , UpperCAmelCase_=39 , UpperCAmelCase_=16 , UpperCAmelCase_=224 , UpperCAmelCase_=14 , UpperCAmelCase_="gelu" , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.0 , UpperCAmelCase_=1E-1_0 , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowercase__: Dict = hidden_size
lowercase__: int = intermediate_size
lowercase__: str = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Optional[int] = patch_size
lowercase__: Any = image_size
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = attention_dropout
lowercase__: Dict = layer_norm_eps
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = qkv_bias
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , **UpperCAmelCase_) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowercase__: Optional[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
lowercase__: Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """instructblip_qformer"""
def __init__( self , UpperCAmelCase_=30_522 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3_072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=0 , UpperCAmelCase_="absolute" , UpperCAmelCase_=2 , UpperCAmelCase_=1_408 , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowercase__: Optional[int] = vocab_size
lowercase__: Optional[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Dict = num_attention_heads
lowercase__: Tuple = hidden_act
lowercase__: Union[str, Any] = intermediate_size
lowercase__: int = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Union[str, Any] = max_position_embeddings
lowercase__: Tuple = initializer_range
lowercase__: int = layer_norm_eps
lowercase__: Optional[Any] = position_embedding_type
lowercase__: Optional[Any] = cross_attention_frequency
lowercase__: List[str] = encoder_hidden_size
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , **UpperCAmelCase_) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowercase__: Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
lowercase__: Optional[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """instructblip"""
UpperCamelCase__ = True
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=32 , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if vision_config is None:
lowercase__: List[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.")
if qformer_config is None:
lowercase__: Optional[int] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
if text_config is None:
lowercase__: Dict = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
lowercase__: int = InstructBlipVisionConfig(**UpperCAmelCase_)
lowercase__: List[Any] = InstructBlipQFormerConfig(**UpperCAmelCase_)
lowercase__: Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
lowercase__: Any = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_)
lowercase__: List[Any] = self.text_config.tie_word_embeddings
lowercase__: int = self.text_config.is_encoder_decoder
lowercase__: Optional[int] = num_query_tokens
lowercase__: Tuple = self.vision_config.hidden_size
lowercase__: Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__: str = 1.0
lowercase__: Union[str, Any] = 0.02
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ , ) -> Union[str, Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: int = copy.deepcopy(self.__dict__)
lowercase__: int = self.vision_config.to_dict()
lowercase__: Optional[Any] = self.qformer_config.to_dict()
lowercase__: Union[str, Any] = self.text_config.to_dict()
lowercase__: List[Any] = self.__class__.model_type
return output
| 716 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
| 120 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
UpperCamelCase_ : Union[str, Any] = True
from torch.cuda.amp import autocast
UpperCamelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to log verbose messages or not."""} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.999_995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def __a ( _UpperCamelCase: ModelArguments , _UpperCamelCase: TrainingArguments ) -> List[str]:
"""simple docstring"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case = logging.WARNING
if model_args.verbose_logging:
_snake_case = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_snake_case = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
default=__lowerCAmelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=__lowerCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : WavaVecaForPreTraining
SCREAMING_SNAKE_CASE_ : WavaVecaFeatureExtractor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = "longest"
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_snake_case = self.feature_extractor.pad(
_SCREAMING_SNAKE_CASE ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
_snake_case = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
_snake_case = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_snake_case = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
_snake_case = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_snake_case = 1
_snake_case = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_snake_case = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=_SCREAMING_SNAKE_CASE ,min_masks=2 ,)
return batch
class _a ( __lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1.0 ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = 0
_snake_case = max_gumbel_temp
_snake_case = min_gumbel_temp
_snake_case = gumbel_temp_decay
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> torch.Tensor:
model.train()
_snake_case = self._prepare_inputs(_SCREAMING_SNAKE_CASE )
if self.use_amp:
with autocast():
_snake_case = self.compute_loss(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else:
_snake_case = self.compute_loss(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_snake_case = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_SCREAMING_SNAKE_CASE ).backward()
elif self.use_apex:
with amp.scale_loss(_SCREAMING_SNAKE_CASE ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_SCREAMING_SNAKE_CASE )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def __a ( ) -> str:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_snake_case = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase: Tuple ):
# check that all files have the correct sampling rate
_snake_case , _snake_case = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_snake_case = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_snake_case = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase: Dict ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_snake_case = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_snake_case = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_snake_case = WavaVecaForPreTraining(_UpperCamelCase )
_snake_case = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
_snake_case = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 185 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def __a ( _UpperCamelCase: Any , _UpperCamelCase: List[Any] , _UpperCamelCase: Union[str, Any]=8 ) -> Optional[int]:
"""simple docstring"""
_snake_case = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_snake_case = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( __lowerCAmelCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,unet=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,movq=_SCREAMING_SNAKE_CASE ,)
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
if latents is None:
_snake_case = randn_tensor(_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_snake_case = latents.to(_SCREAMING_SNAKE_CASE )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,) -> Union[str, Any]:
_snake_case = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
_snake_case = self.tokenizer(
_SCREAMING_SNAKE_CASE ,padding="max_length" ,truncation=_SCREAMING_SNAKE_CASE ,max_length=77 ,return_attention_mask=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
_snake_case = text_inputs.input_ids
_snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding="longest" ,return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_snake_case = text_input_ids.to(_SCREAMING_SNAKE_CASE )
_snake_case = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
_snake_case = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
_snake_case = 42
if negative_prompt is None:
_snake_case = [""] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="""
f""" {type(_SCREAMING_SNAKE_CASE )}.""" )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
_snake_case = negative_prompt
_snake_case = self.tokenizer(
_SCREAMING_SNAKE_CASE ,padding="max_length" ,max_length=77 ,truncation=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
_snake_case = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
_snake_case = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_snake_case = negative_prompt_embeds.shape[1]
_snake_case = negative_prompt_embeds.repeat(1 ,_SCREAMING_SNAKE_CASE )
_snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_SCREAMING_SNAKE_CASE )
_snake_case = uncond_text_encoder_hidden_states.shape[1]
_snake_case = uncond_text_encoder_hidden_states.repeat(1 ,_SCREAMING_SNAKE_CASE ,1 )
_snake_case = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,_SCREAMING_SNAKE_CASE ,-1 )
_snake_case = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
_snake_case = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_snake_case = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
_snake_case = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
_snake_case , _snake_case = cpu_offload_with_hook(self.safety_checker ,_SCREAMING_SNAKE_CASE ,prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> Optional[int]:
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 100 ,_SCREAMING_SNAKE_CASE = 4.0 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "pil" ,_SCREAMING_SNAKE_CASE = True ,) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = 1
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}""" )
_snake_case = self._execution_device
_snake_case = batch_size * num_images_per_prompt
_snake_case = guidance_scale > 1.0
_snake_case , _snake_case , _snake_case = self._encode_prompt(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = torch.cat(_SCREAMING_SNAKE_CASE ,dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = torch.cat(_SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE ,dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE )
_snake_case = self.scheduler.timesteps
_snake_case = self.unet.config.in_channels
_snake_case , _snake_case = get_new_h_w(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
_snake_case = self.unet(
sample=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,added_cond_kwargs=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,).prev_sample
# post-processing
_snake_case = self.movq.decode(_SCREAMING_SNAKE_CASE ,force_not_quantize=_SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 ,1 )
_snake_case = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 185 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase (__lowerCAmelCase ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
_UpperCAmelCase : Any = _readaa(lowerCamelCase_ )
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(lowerCamelCase_ )
_UpperCAmelCase : str = _readaa(lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = _readaa(lowerCamelCase_ )
_UpperCAmelCase : str = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : List[Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
_UpperCAmelCase : Tuple = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , "Please use tf.one_hot on tensors." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = labels_dense.shape[0]
_UpperCAmelCase : Optional[int] = numpy.arange(lowerCamelCase_ ) * num_classes
_UpperCAmelCase : Optional[int] = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Union[str, Any] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
_UpperCAmelCase : int = _readaa(lowerCamelCase_ )
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_UpperCAmelCase : Tuple = _readaa(lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = bytestream.read(lowerCamelCase_ )
_UpperCAmelCase : Any = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class lowerCAmelCase__ :
@deprecated(
UpperCAmelCase_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=dtypes.floataa , lowerCamelCase__ : int=True , lowerCamelCase__ : Union[str, Any]=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Optional[Any] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_UpperCAmelCase : Any = 1_00_00
_UpperCAmelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_UpperCAmelCase : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Union[str, Any] = images.astype(numpy.floataa )
_UpperCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = 0
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return self._images
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return self._labels
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return self._num_examples
@property
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
return self._epochs_completed
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=True ) ->List[Any]:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : List[Any] = [1] * 7_84
_UpperCAmelCase : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = self.images[perma]
_UpperCAmelCase : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : int = self._images[start : self._num_examples]
_UpperCAmelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
_UpperCAmelCase : int = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[str] = batch_size - rest_num_examples
_UpperCAmelCase : List[Any] = self._index_in_epoch
_UpperCAmelCase : Any = self._images[start:end]
_UpperCAmelCase : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , "Please write your own downloading logic." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
_UpperCAmelCase : List[str] = f.size()
print("Successfully downloaded" , lowerCamelCase_ , lowerCamelCase_ , "bytes." )
return filepath
@deprecated(
lowerCamelCase_ , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=5_000 , __lowerCAmelCase=None , __lowerCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
_UpperCAmelCase : Any = fake()
_UpperCAmelCase : Optional[int] = fake()
_UpperCAmelCase : int = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
_UpperCAmelCase : Any = DEFAULT_SOURCE_URL
_UpperCAmelCase : int = 'train-images-idx3-ubyte.gz'
_UpperCAmelCase : int = 'train-labels-idx1-ubyte.gz'
_UpperCAmelCase : Union[str, Any] = 't10k-images-idx3-ubyte.gz'
_UpperCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
_UpperCAmelCase : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : Any = _extract_images(lowerCamelCase_ )
_UpperCAmelCase : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : int = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
_UpperCAmelCase : List[Any] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : str = _extract_images(lowerCamelCase_ )
_UpperCAmelCase : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : Tuple = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
_UpperCAmelCase : Tuple = (
'Validation size should be between 0 and '
F"""{len(lowerCamelCase_ )}. Received: {validation_size}."""
)
raise ValueError(lowerCamelCase_ )
_UpperCAmelCase : Dict = train_images[:validation_size]
_UpperCAmelCase : str = train_labels[:validation_size]
_UpperCAmelCase : List[str] = train_images[validation_size:]
_UpperCAmelCase : Any = train_labels[validation_size:]
_UpperCAmelCase : List[str] = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
_UpperCAmelCase : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 706 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = data
lowercase :List[str] = None
lowercase :Tuple = None
def lowerCamelCase () -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''')
lowercase :int = input('''Enter the value of the root node: ''').strip().lower()
lowercase :queue.Queue = queue.Queue()
lowercase :Optional[int] = TreeNode(int(lowerCamelCase__))
q.put(lowerCamelCase__)
while not q.empty():
lowercase :Tuple = q.get()
lowercase :Optional[Any] = F"""Enter the left node of {node_found.data}: """
lowercase :Optional[Any] = input(lowerCamelCase__).strip().lower() or "n"
if check == "n":
return tree_node
lowercase :str = TreeNode(int(lowerCamelCase__))
lowercase :List[str] = left_node
q.put(lowerCamelCase__)
lowercase :List[str] = F"""Enter the right node of {node_found.data}: """
lowercase :Tuple = input(lowerCamelCase__).strip().lower() or "n"
if check == "n":
return tree_node
lowercase :Dict = TreeNode(int(lowerCamelCase__))
lowercase :Dict = right_node
q.put(lowerCamelCase__)
raise
def lowerCamelCase (a_ :List[str]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
print(node.data , end=''',''')
pre_order(node.left)
pre_order(node.right)
def lowerCamelCase (a_ :Optional[int]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
in_order(node.left)
print(node.data , end=''',''')
in_order(node.right)
def lowerCamelCase (a_ :Optional[int]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
post_order(node.left)
post_order(node.right)
print(node.data , end=''',''')
def lowerCamelCase (a_ :List[str]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
lowercase :queue.Queue = queue.Queue()
q.put(lowerCamelCase__)
while not q.empty():
lowercase :str = q.get()
print(node_dequeued.data , end=''',''')
if node_dequeued.left:
q.put(node_dequeued.left)
if node_dequeued.right:
q.put(node_dequeued.right)
def lowerCamelCase (a_ :Any) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
lowercase :queue.Queue = queue.Queue()
q.put(lowerCamelCase__)
while not q.empty():
lowercase :int = []
while not q.empty():
lowercase :Optional[Any] = q.get()
print(node_dequeued.data , end=''',''')
if node_dequeued.left:
list_.append(node_dequeued.left)
if node_dequeued.right:
list_.append(node_dequeued.right)
print()
for node in list_:
q.put(lowerCamelCase__)
def lowerCamelCase (a_ :int) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
lowercase :list[TreeNode] = []
lowercase :Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''')
stack.append(lowerCamelCase__)
lowercase :str = n.left
# end of while means current node doesn't have left child
lowercase :Union[str, Any] = stack.pop()
# start to traverse its right child
lowercase :Tuple = n.right
def lowerCamelCase (a_ :List[str]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
lowercase :list[TreeNode] = []
lowercase :Optional[Any] = node
while n or stack:
while n:
stack.append(lowerCamelCase__)
lowercase :List[str] = n.left
lowercase :Tuple = stack.pop()
print(n.data , end=''',''')
lowercase :Optional[Any] = n.right
def lowerCamelCase (a_ :Optional[int]) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__) or not node:
return
lowercase :str = [], []
lowercase :Union[str, Any] = node
stacka.append(lowerCamelCase__)
while stacka: # to find the reversed order of post order, store it in stack2
lowercase :Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left)
if n.right:
stacka.append(n.right)
stacka.append(lowerCamelCase__)
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''')
def lowerCamelCase (a_ :Dict = "" , a_ :List[str]=50 , a_ :Tuple="*") -> str:
if not s:
return "\n" + width * char
lowercase :Union[str, Any] = divmod(width - len(lowerCamelCase__) - 2 , 2)
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
UpperCAmelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 677 | '''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A (__magic_name__ ):
def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
__UpperCAmelCase : List[str] = "__cached_" + self.fget.__name__
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if cached is None:
__UpperCAmelCase : List[str] = self.fget(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return cached
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if is_torch_fx_proxy(lowerCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return _is_numpy(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.device )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ )
else:
return False
return isinstance(lowerCamelCase__ , torch.dtype )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCamelCase__ , tf.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCamelCase__ )
return type(lowerCamelCase__ ) == tf.Tensor
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase__ , jnp.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_py_obj(lowerCamelCase__ ) for o in obj]
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ ).tolist()
elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return np.array(lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ )
else:
return obj
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Any = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__UpperCAmelCase : Dict = getattr(self , class_fields[0].name )
__UpperCAmelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = first_field.items()
__UpperCAmelCase : Union[str, Any] = True
else:
try:
__UpperCAmelCase : Optional[int] = iter(UpperCamelCase_ )
__UpperCAmelCase : Dict = True
except TypeError:
__UpperCAmelCase : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase_ ):
if (
not isinstance(UpperCamelCase_ , (list, tuple) )
or not len(UpperCamelCase_ ) == 2
or not isinstance(element[0] , UpperCamelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
__UpperCAmelCase : Optional[int] = first_field
else:
for field in class_fields:
__UpperCAmelCase : Any = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A (__magic_name__ , __magic_name__ ):
@classmethod
def _snake_case ( cls , UpperCamelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A (__magic_name__ ):
snake_case :Dict = "longest"
snake_case :Dict = "max_length"
snake_case :Union[str, Any] = "do_not_pad"
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "pt"
snake_case :List[str] = "tf"
snake_case :Any = "np"
snake_case :Union[str, Any] = "jax"
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = context_managers
__UpperCAmelCase : str = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase_ )
def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_class.__name__
__UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]:
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k
if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
@contextmanager
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.T if axes is None else array.permute(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.reshape(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.unsqueeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.size(lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.size(lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCamelCase__ , (tuple, list) ):
__UpperCAmelCase : List[str] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : int = f"""{repo_id}--{value}"""
return auto_map
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
for base_class in inspect.getmro(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = base_class.__module__
__UpperCAmelCase : Union[str, Any] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 168 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase : Dict = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase : Any = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowercase :
def __init__( self ):
snake_case_ = WATERMARK_BITS
snake_case_ = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def a ( self , snake_case ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
snake_case_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = [self.encoder.encode(snake_case , 'dwtDct' ) for image in images]
snake_case_ = torch.from_numpy(np.array(snake_case ) ).permute(0 , 3 , 1 , 2 )
snake_case_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 108 |
def __lowerCamelCase ( UpperCamelCase__ = 50000000 ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = int((limit - 24) ** (1 / 2) )
snake_case_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
snake_case_ = primea * primea
for primea in primes:
snake_case_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ = primea * primea * primea * primea
snake_case_ = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 108 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : int ):
_snake_case = tempfile.mkdtemp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Dict , **_lowerCamelCase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 224 | 0 |
'''simple docstring'''
A ='0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 358 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _a ( unittest.TestCase ):
def __init__( self : List[Any] , lowercase : Dict , lowercase : List[str]=13 , lowercase : str=7 , lowercase : List[str]=True , lowercase : List[str]=True , lowercase : Optional[Any]=True , lowercase : Optional[Any]=True , lowercase : Any=99 , lowercase : Any=32 , lowercase : Any=5 , lowercase : Tuple=4 , lowercase : List[Any]=37 , lowercase : List[Any]="gelu" , lowercase : int=0.1 , lowercase : Any=0.1 , lowercase : Optional[int]=512 , lowercase : List[str]=16 , lowercase : Union[str, Any]=2 , lowercase : int=0.02 , lowercase : int=4 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _a ( __a , unittest.TestCase ):
__a : Any = True
__a : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase )[0]
UpperCAmelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowercase )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowercase )
UpperCAmelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
| 358 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = CLIPConfig
__magic_name__ : Dict = ["CLIPEncoderLayer"]
def __init__( self : str , lowerCAmelCase : CLIPConfig )-> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase )
UpperCAmelCase = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def a__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=0.5 , lowerCAmelCase : Optional[Any]=0.5 )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.vision_model(lowerCAmelCase )[0]
UpperCAmelCase = self.p_head(lowerCAmelCase )
UpperCAmelCase = nsfw_detected.flatten()
UpperCAmelCase = nsfw_detected > p_threshold
UpperCAmelCase = nsfw_detected.tolist()
if any(lowerCAmelCase ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase ):
if nsfw_detected_:
UpperCAmelCase = np.zeros(images[idx].shape )
UpperCAmelCase = self.w_head(lowerCAmelCase )
UpperCAmelCase = watermark_detected.flatten()
UpperCAmelCase = watermark_detected > w_threshold
UpperCAmelCase = watermark_detected.tolist()
if any(lowerCAmelCase ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(lowerCAmelCase ):
if watermark_detected_:
UpperCAmelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 210 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "xlm"
__magic_name__ : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , lowerCAmelCase : str=30145 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=16 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[Any]=2048**-0.5 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]="first" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : int=2 , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = emb_dim
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = use_lang_emb
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = bos_index
UpperCAmelCase = eos_index
UpperCAmelCase = pad_index
UpperCAmelCase = unk_index
UpperCAmelCase = mask_index
UpperCAmelCase = is_encoder
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = embed_init_std
UpperCAmelCase = init_std
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_proj_to_labels
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = mask_token_id
UpperCAmelCase = lang_id
if "n_words" in kwargs:
UpperCAmelCase = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
@property
def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 210 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
if len(__lowerCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE_ : list[float] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = degree
def __add__( self , __lowerCAmelCase ):
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE_ : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCAmelCase )
def __sub__( self , __lowerCAmelCase ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase )
return polynomial
def __repr__( self ):
return self.__str__()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : list[float] = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCAmelCase )
def __A ( self , __lowerCAmelCase = 0 ):
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE_ : List[Any] = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE_ : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCAmelCase )
def __eq__( self , __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , __lowerCAmelCase ):
return not self.__eq__(__lowerCAmelCase )
| 311 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Any = (UnCLIPScheduler,)
def lowerCAmelCase ( self : int , **_lowercase : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_lowercase )
return config
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowercase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowercase )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowercase , prev_timestep=_lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.scheduler_classes[0]
_UpperCamelCase: Union[str, Any] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase: Dict = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.scheduler_classes[0]
_UpperCamelCase: List[Any] = self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase: Optional[Any] = scheduler_class(**_lowercase )
_UpperCamelCase: Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowercase ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_lowercase ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_lowercase ) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Any = self.scheduler_classes[0]
_UpperCamelCase: Tuple = self.get_scheduler_config()
_UpperCamelCase: List[Any] = scheduler_class(**_lowercase )
_UpperCamelCase: Union[str, Any] = scheduler.timesteps
_UpperCamelCase: str = self.dummy_model()
_UpperCamelCase: List[Any] = self.dummy_sample_deter
_UpperCamelCase: List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
_UpperCamelCase: int = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase: Any = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
_UpperCamelCase: Optional[Any] = pred_prev_sample
_UpperCamelCase: List[Any] = torch.sum(torch.abs(_lowercase ) )
_UpperCamelCase: Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.scheduler_classes[0]
_UpperCamelCase: List[str] = self.get_scheduler_config()
_UpperCamelCase: List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(25 )
_UpperCamelCase: Union[str, Any] = scheduler.timesteps
_UpperCamelCase: Union[str, Any] = self.dummy_model()
_UpperCamelCase: int = self.dummy_sample_deter
_UpperCamelCase: str = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
_UpperCamelCase: List[Any] = model(_lowercase , _lowercase )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase: Union[str, Any] = None
else:
_UpperCamelCase: Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase: List[Any] = scheduler.step(
_lowercase , _lowercase , _lowercase , prev_timestep=_lowercase , generator=_lowercase ).prev_sample
_UpperCamelCase: Optional[Any] = pred_prev_sample
_UpperCamelCase: Optional[Any] = torch.sum(torch.abs(_lowercase ) )
_UpperCamelCase: List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
pass | 271 | import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCAmelCase_ = data_utils.TransfoXLTokenizer
UpperCAmelCase_ = data_utils.TransfoXLCorpus
UpperCAmelCase_ = data_utils
UpperCAmelCase_ = data_utils
def lowerCAmelCase_ ( lowercase: Tuple , lowercase: Union[str, Any] , lowercase: Any , lowercase: List[Any] ) -> List[Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , '''rb''' ) as fp:
_UpperCamelCase: Optional[int] = pickle.load(lowercase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase: Tuple = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCamelCase: List[str] = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
_UpperCamelCase: str = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , lowercase )
_UpperCamelCase: str = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase: Optional[Any] = os.path.abspath(lowercase )
_UpperCamelCase: List[str] = os.path.abspath(lowercase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase: Tuple = TransfoXLConfig()
else:
_UpperCamelCase: Union[str, Any] = TransfoXLConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase: Optional[int] = TransfoXLLMHeadModel(lowercase )
_UpperCamelCase: int = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
_UpperCamelCase: List[str] = os.path.join(lowercase , lowercase )
_UpperCamelCase: Union[str, Any] = os.path.join(lowercase , lowercase )
print(F"""Save PyTorch model to {os.path.abspath(lowercase )}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {os.path.abspath(lowercase )}""" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
UpperCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 271 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
UpperCAmelCase__ : List[str] = 1_0_2_4
UpperCAmelCase__ : List[str] = 4_0_9_6
UpperCAmelCase__ : Optional[int] = 2_4
UpperCAmelCase__ : str = 1_6
UpperCAmelCase__ : Optional[Any] = [5, 1_1, 1_7, 2_3]
UpperCAmelCase__ : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
UpperCAmelCase__ : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase__ : List[str] = 7_6_8
UpperCAmelCase__ : Union[str, Any] = [1, 1, 1, 0.5]
UpperCAmelCase__ : Union[str, Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
UpperCAmelCase__ : int = 1_5_0
UpperCAmelCase__ : Any = 1_6
UpperCAmelCase__ : Optional[Any] = (1, 3_8_4, 3_8_4)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : int = """project"""
if "ade" in checkpoint_url:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = 7_6_8
UpperCAmelCase__ : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase__ : str = 1_5_0
UpperCAmelCase__ : str = 1_6
UpperCAmelCase__ : List[Any] = """huggingface/label-files"""
UpperCAmelCase__ : Optional[int] = """ade20k-id2label.json"""
UpperCAmelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase__ : List[str] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase__ : int = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
UpperCAmelCase__ : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
UpperCAmelCase__ : Any = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
UpperCAmelCase__ : str = name.replace("""proj""" , """projection""" )
if "blocks" in name:
UpperCAmelCase__ : str = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : str = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase__ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
UpperCAmelCase__ : int = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
UpperCAmelCase__ : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
UpperCAmelCase__ : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
UpperCAmelCase__ : List[Any] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
UpperCAmelCase__ : Dict = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
UpperCAmelCase__ : Any = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
UpperCAmelCase__ : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase__ : Union[str, Any] = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
UpperCAmelCase__ : List[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
UpperCAmelCase__ : Dict = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
UpperCAmelCase__ : Tuple = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase__ : int = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase__ : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase__ : str = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
UpperCAmelCase__ : str = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
UpperCAmelCase__ : Tuple = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
UpperCAmelCase__ : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
UpperCAmelCase__ : Any = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
UpperCAmelCase__ : Dict = name.replace("""..""" , """.""" )
if "stem.conv" in name:
UpperCAmelCase__ : int = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
UpperCAmelCase__ : int = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
UpperCAmelCase__ : List[Any] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : Dict = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : Tuple = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Any = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase__ : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[int] = state_dict.pop(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ : Tuple = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
UpperCAmelCase__ : Tuple = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
UpperCAmelCase__ : Tuple = DPTImageProcessor(size=UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Any = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
UpperCAmelCase__ : Dict = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
UpperCAmelCase__ : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__A =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 113 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCamelCase =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=1_6 , UpperCamelCase__ = 1_0 , UpperCamelCase__ = 2 ):
def get_dataset(UpperCamelCase__ ):
UpperCamelCase__ : Optional[int] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase__ : Union[str, Any] = get_dataset(UpperCamelCase__ )
UpperCamelCase__ : List[str] = get_dataset(UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
UpperCamelCase__ : str = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
UpperCamelCase__ : int = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = batch
UpperCamelCase__ : int = model(UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase__ : Any = nn.Parameter(torch.randn(1 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return x * self.a + self.b
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase__ : Optional[int] = DummyModel()
UpperCamelCase__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = dummy_dataloaders()
UpperCamelCase__ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=__SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCamelCase__ : List[str] = Accelerator(project_config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase__ : Tuple = DummyModel()
UpperCamelCase__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = dummy_dataloaders()
# Train baseline
UpperCamelCase__ : Optional[Any] = Accelerator()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
UpperCamelCase__ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '''initial''' )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : Tuple = model.a.item(), model.b.item()
UpperCamelCase__ : List[str] = optimizer.state_dict()
UpperCamelCase__ : Tuple = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : str = model.a.item(), model.b.item()
UpperCamelCase__ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase__ : Optional[Any] = DummyModel()
UpperCamelCase__ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = dummy_dataloaders()
UpperCamelCase__ : Dict = Accelerator()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(__SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : str = model.a.item(), model.b.item()
UpperCamelCase__ : Tuple = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
UpperCamelCase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoint''' )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : str = model.a.item(), model.b.item()
UpperCamelCase__ : Any = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase__ : Union[str, Any] = DummyModel()
UpperCamelCase__ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ ,UpperCamelCase__ : int = dummy_dataloaders()
UpperCamelCase__ : str = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCamelCase__ : Tuple = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCamelCase__) ,(UpperCamelCase__)) : Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase__ : Optional[int] = optimizer.state_dict()
UpperCamelCase__ : Dict = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : List[str] = model.a.item(), model.b.item()
UpperCamelCase__ : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase__ : Dict = DummyModel()
UpperCamelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = dummy_dataloaders()
UpperCamelCase__ : Dict = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_0''' ) )
((UpperCamelCase__) ,(UpperCamelCase__)) : Tuple = model.a.item(), model.b.item()
UpperCamelCase__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((UpperCamelCase__) ,(UpperCamelCase__)) : int = model.a.item(), model.b.item()
UpperCamelCase__ : List[Any] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = torch.tensor([1, 2, 3] )
UpperCamelCase__ : Optional[int] = torch.tensor([2, 3, 4] )
UpperCamelCase__ : List[str] = DummyModel()
UpperCamelCase__ : Tuple = torch.optim.Adam(net.parameters() )
UpperCamelCase__ : int = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase__ : Any = DummyModel()
UpperCamelCase__ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ : List[str] = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = dummy_dataloaders()
UpperCamelCase__ : Any = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCamelCase__ : str = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCamelCase__ : Optional[Any] = scheduler.state_dict()
train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase__ : List[str] = DummyModel()
UpperCamelCase__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE , total_limit=2 )
# Train baseline
UpperCamelCase__ : Any = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = accelerator.prepare(__SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase ="/tmp/accelerate/state_checkpointing"
lowerCamelCase =DummyModel()
lowerCamelCase =torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCamelCase =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCamelCase , lowerCamelCase =dummy_dataloaders()
lowerCamelCase =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCamelCase =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCamelCase , lowerCamelCase =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCamelCase =group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowerCamelCase =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowerCamelCase =group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowerCamelCase =group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 285 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ = " " ):
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : int = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCamelCase__ : int = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase_ : Any = False
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionAttendAndExcitePipeline
snake_case = False
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_a )
@classmethod
def lowerCamelCase__ ( cls : int ) -> Optional[int]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_a )
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
A_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A_ = CLIPTextModel(_a )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : int , _snake_case : Optional[Any] , _snake_case : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
if str(_a ).startswith("mps" ):
A_ = torch.manual_seed(_a )
else:
A_ = torch.Generator(device=_a ).manual_seed(_a )
A_ = A_ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a ).images
A_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A_ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def lowerCamelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_a )
@classmethod
def lowerCamelCase__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_a )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = torch.manual_seed(51 )
A_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=_a , torch_dtype=torch.floataa )
pipe.to("cuda" )
A_ = "a painting of an elephant with glasses"
A_ = [5, 7]
A_ = pipe(
prompt=_a , token_indices=_a , guidance_scale=7.5 , generator=_a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 721 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[str] = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ : Union[str, Any] = {
'''yjernite/retribert-base-uncased''': 512,
}
UpperCamelCase_ : str = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_INIT_CONFIGURATION
snake_case = RetriBertTokenizer
snake_case = ["input_ids", "attention_mask"]
def __init__( self : Tuple , _snake_case : int=None , _snake_case : Any=None , _snake_case : str=True , _snake_case : List[Any]="[UNK]" , _snake_case : Optional[Any]="[SEP]" , _snake_case : str="[PAD]" , _snake_case : List[str]="[CLS]" , _snake_case : List[str]="[MASK]" , _snake_case : Dict=True , _snake_case : Optional[int]=None , **_snake_case : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , _snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _snake_case ) != tokenize_chinese_chars
):
A_ = getattr(_snake_case , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**_snake_case )
A_ = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 482 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 531 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 100_0000 ):
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Optional[int] = 1
for current_denominator in range(1 , limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : List[Any] = current_numerator
_UpperCAmelCase : Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 506 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
_a : int = mock.Mock()
_a : Optional[int] = 5_0_0
_a : Any = {}
_a : Optional[int] = HTTPError
_a : str = {}
# Download this model to make sure it's in the cache.
_a : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase_ ) as mock_head:
_a : int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_a : Tuple = mock.Mock()
_a : Union[str, Any] = 5_0_0
_a : int = {}
_a : Optional[int] = HTTPError
_a : List[str] = {}
# Download this model to make sure it's in the cache.
_a : str = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase_ ) as mock_head:
_a : Union[str, Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
try:
_a : Union[str, Any] = tempfile.mktemp()
with open(lowerCamelCase_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowerCamelCase_ )
_a : List[str] = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowerCamelCase_ )
_a : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
_a : List[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __UpperCamelCase ( cls ) -> Optional[Any]:
_a : str = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __UpperCamelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[Any] = os.path.join(lowerCamelCase_ , 'vocab.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_a : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
_a : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ , repo_id='test-tokenizer' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_a : Union[str, Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = os.path.join(lowerCamelCase_ , 'vocab.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_a : Any = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
_a : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_a : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> int:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[Any] = os.path.join(lowerCamelCase_ , 'vocab.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_a : str = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_a : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = os.path.join(lowerCamelCase_ , 'vocab.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_a : Union[str, Any] = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
_a : Optional[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_a : Any = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
_a : int = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Dict:
_a : List[str] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> int:
_a : int = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> Any:
_a : int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> List[Any]:
_a : Dict = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[int]:
_a : Tuple = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[int]:
_a : List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> str:
_a : Tuple = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> Optional[int]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_a : Optional[int] = Trie()
_a : Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ , ['AB', 'C'] )
| 700 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 1_0_0 , A = 0.01 , A = 1 , ):
'''simple docstring'''
_a : int = False
_a : Optional[Any] = search_prob
_a : List[Any] = start_temperate
_a : Any = []
_a : List[Any] = 0
_a : Union[str, Any] = None
while not search_end:
_a : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_a : Optional[Any] = current_state
scores.append(A )
iterations += 1
_a : List[Any] = None
_a : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor
_a : Optional[int] = neighbors.pop(A )
_a : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : Tuple = picked_neighbor
else:
_a : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : List[str] = picked_neighbor
_a : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : str = True
else:
_a : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
UpperCAmelCase_ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 424 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=7 )-> Dict:
A__ = None
if token is not None:
A__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
A__ = '''636036'''
A__ = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
A__ = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
return result["workflow_runs"]
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] )-> Dict:
A__ = get_daily_ci_runs(UpperCamelCase_ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run['''id''']
break
return workflow_run_id
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] )-> Tuple:
A__ = get_last_daily_ci_runs(UpperCamelCase_ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=UpperCamelCase_ , token=UpperCamelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase_ , artifact_url=UpperCamelCase_ , output_dir=UpperCamelCase_ , token=UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple )-> Optional[int]:
get_last_daily_ci_artifacts(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(UpperCamelCase_ , f"{artifact_name}.zip" )
if os.path.isfile(UpperCamelCase_ ):
A__ = {}
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
with z.open(UpperCamelCase_ ) as f:
A__ = f.read().decode('''UTF-8''' )
return results
| 632 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( A__ , A__ , A__ ):
UpperCamelCase__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a__ , a__ , a__ = None , a__ = 5_0_2_5_7 , a__ = 1_0_2_4 , a__ = 7_6_8 , a__ = 1_2 , a__ = 1_2 , a__ = None , a__ = "gelu_new" , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 1e-5 , a__ = 0.0_2 , a__ = True , a__ = True , a__ = False , a__ = False , ):
super().__init__()
A__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal.")
A__ = prefix_inner_dim
A__ = prefix_hidden_dim
A__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ = (
nn.Linear(self.prefix_hidden_dim , a__) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ = GPTaConfig(
vocab_size=a__ , n_positions=a__ , n_embd=a__ , n_layer=a__ , n_head=a__ , n_inner=a__ , activation_function=a__ , resid_pdrop=a__ , embd_pdrop=a__ , attn_pdrop=a__ , layer_norm_epsilon=a__ , initializer_range=a__ , scale_attn_weights=a__ , use_cache=a__ , scale_attn_by_inverse_layer_idx=a__ , reorder_and_upcast_attn=a__ , )
A__ = GPTaLMHeadModel(a__)
def snake_case_ ( self , a__ , a__ , a__ = None , a__ = None , ):
A__ = self.transformer.transformer.wte(a__)
A__ = self.encode_prefix(a__)
A__ = self.decode_prefix(a__)
A__ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
A__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
A__ = torch.cat((dummy_token, input_ids) , dim=1)
A__ = self.transformer(inputs_embeds=a__ , labels=a__ , attention_mask=a__)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a__ , a__):
return torch.zeros(a__ , self.prefix_length , dtype=torch.intaa , device=a__)
def snake_case_ ( self , a__):
return self.encode_prefix(a__)
@torch.no_grad()
def snake_case_ ( self , a__ , a__ , a__):
A__ = torch.split(a__ , 1 , dim=0)
A__ = []
A__ = []
for feature in features:
A__ = self.decode_prefix(feature.to(a__)) # back to the clip feature
# Only support beam search for now
A__ , A__ = self.generate_beam(
input_embeds=a__ , device=a__ , eos_token_id=a__)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
A__ = torch.stack(a__)
A__ = torch.stack(a__)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a__=None , a__=None , a__=None , a__ = 5 , a__ = 6_7 , a__ = 1.0 , a__ = None , ):
A__ = eos_token_id
A__ = None
A__ = None
A__ = torch.ones(a__ , device=a__ , dtype=torch.int)
A__ = torch.zeros(a__ , device=a__ , dtype=torch.bool)
if input_embeds is not None:
A__ = input_embeds
else:
A__ = self.transformer.transformer.wte(a__)
for i in range(a__):
A__ = self.transformer(inputs_embeds=a__)
A__ = outputs.logits
A__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ = logits.softmax(-1).log()
if scores is None:
A__ , A__ = logits.topk(a__ , -1)
A__ = generated.expand(a__ , *generated.shape[1:])
A__ , A__ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
A__ = next_tokens
else:
A__ = tokens.expand(a__ , *tokens.shape[1:])
A__ = torch.cat((tokens, next_tokens) , dim=1)
else:
A__ = -float(np.inf)
A__ = 0
A__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ = scores_sum / seq_lengths[:, None]
A__ , A__ = scores_sum_average.view(-1).topk(a__ , -1)
A__ = next_tokens // scores_sum.shape[1]
A__ = seq_lengths[next_tokens_source]
A__ = next_tokens % scores_sum.shape[1]
A__ = next_tokens.unsqueeze(1)
A__ = tokens[next_tokens_source]
A__ = torch.cat((tokens, next_tokens) , dim=1)
A__ = generated[next_tokens_source]
A__ = scores_sum_average * seq_lengths
A__ = is_stopped[next_tokens_source]
A__ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
A__ = torch.cat((generated, next_token_embed) , dim=1)
A__ = is_stopped + next_tokens.eq(a__).squeeze()
if is_stopped.all():
break
A__ = scores / seq_lengths
A__ = scores.argsort(descending=a__)
# tokens tensors are already padded to max_seq_length
A__ = [tokens[i] for i in order]
A__ = torch.stack(a__ , dim=0)
A__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class A__ :
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: list[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(_SCREAMING_SNAKE_CASE)
self.set_fail_transitions()
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> None:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 0
for character in keyword:
__lowerCAmelCase : Union[str, Any] = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
__lowerCAmelCase : Optional[int] = len(self.adlist) - 1
else:
__lowerCAmelCase : Optional[Any] = next_state
self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> None:
"""simple docstring"""
__lowerCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = 0
while q:
__lowerCAmelCase : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.adlist[r]["fail_state"]
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]["value"]) is None
and state != 0
):
__lowerCAmelCase : int = self.adlist[state]["fail_state"]
__lowerCAmelCase : str = self.find_next_state(
_SCREAMING_SNAKE_CASE , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
__lowerCAmelCase : str = 0
__lowerCAmelCase : int = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: str) -> dict[str, list[int]]:
"""simple docstring"""
__lowerCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
__lowerCAmelCase : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE)):
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , string[i]) is None
and current_state != 0
):
__lowerCAmelCase : Any = self.adlist[current_state]["fail_state"]
__lowerCAmelCase : Union[str, Any] = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i])
if next_state is None:
__lowerCAmelCase : List[str] = 0
else:
__lowerCAmelCase : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowerCAmelCase : Optional[Any] = []
result[key].append(i - len(_SCREAMING_SNAKE_CASE) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 615 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
raise NotImplementedError() | 615 | 1 |
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
while number > 0:
lowerCAmelCase__ = number % 10
sum_of_digits += last_digit
lowerCAmelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ (lowercase__ : int = 1_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = factorial(lowercase__ )
lowerCAmelCase__ = split_and_add(lowercase__ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 668 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
SCREAMING_SNAKE_CASE_: str ='cpu'
SCREAMING_SNAKE_CASE_: List[str] ='a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
SCREAMING_SNAKE_CASE_: Tuple ='path-to-your-trained-model'
SCREAMING_SNAKE_CASE_: Tuple =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE_: Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE_: Tuple =pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE_: Any =pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_: str =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE_: int =torch.randn(2, 4, 64, 64)
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.rand(1) * 9_99
SCREAMING_SNAKE_CASE_: Dict =torch.randn(2, 77, 7_68)
SCREAMING_SNAKE_CASE_: Optional[int] =(sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE_: Any =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE_: Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_: int =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE_: Union[str, Any] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE_: int =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE_: str =6_66
SCREAMING_SNAKE_CASE_: int =torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE_: Dict ={'generator': generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE_: int =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 708 | '''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Tuple = """deta"""
a__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self : List[str] , __a : Optional[int]=None , __a : List[str]=900 , __a : Optional[Any]=2048 , __a : Tuple=6 , __a : Dict=2048 , __a : Dict=8 , __a : List[Any]=6 , __a : Tuple=1024 , __a : int=8 , __a : Union[str, Any]=0.0 , __a : Dict=True , __a : Any="relu" , __a : Any=256 , __a : Optional[int]=0.1 , __a : Union[str, Any]=0.0 , __a : Optional[int]=0.0 , __a : Tuple=0.02 , __a : Dict=1.0 , __a : int=True , __a : List[str]=False , __a : Any="sine" , __a : Optional[int]=5 , __a : List[str]=4 , __a : Dict=4 , __a : int=True , __a : Tuple=300 , __a : int=True , __a : Tuple=True , __a : int=1 , __a : List[str]=5 , __a : str=2 , __a : Any=1 , __a : Optional[Any]=1 , __a : Optional[Any]=5 , __a : List[str]=2 , __a : Optional[Any]=0.1 , __a : Tuple=0.25 , **__a : Union[str, Any] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
UpperCAmelCase_ = backbone_config.pop("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(__a )
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
# deformable attributes
UpperCAmelCase_ = num_feature_levels
UpperCAmelCase_ = encoder_n_points
UpperCAmelCase_ = decoder_n_points
UpperCAmelCase_ = two_stage
UpperCAmelCase_ = two_stage_num_proposals
UpperCAmelCase_ = with_box_refine
UpperCAmelCase_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
UpperCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowercase (self : Dict ):
return self.encoder_attention_heads
@property
def _lowercase (self : Any ):
return self.d_model
def _lowercase (self : int ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 415 | 0 |
"""simple docstring"""
import string
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = ""
for i in sequence:
UpperCAmelCase_ : List[Any] = ord(A_ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = string.ascii_letters
UpperCAmelCase_ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A_ )] if c in letters else c for c in sequence )
def snake_case ( ):
from timeit import timeit
print("Running performance benchmarks..." )
UpperCAmelCase_ : Dict = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=A_ )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' ,setup=A_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 95 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( A_ ,A_ ,A_):
# Construct model
if openai_config_file == "":
UpperCamelCase__: Optional[Any] = OpenAIGPTConfig()
else:
UpperCamelCase__: List[Any] = OpenAIGPTConfig.from_json_file(A_)
UpperCamelCase__: Optional[Any] = OpenAIGPTModel(A_)
# Load weights from numpy
load_tf_weights_in_openai_gpt(A_ ,A_ ,A_)
# Save pytorch-model
UpperCamelCase__: Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(model.state_dict() ,A_)
print(F"Save configuration file to {pytorch_config_dump_path}")
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A__: Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 380 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 716 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : List[Any] ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =['''YolosFeatureExtractor''']
UpperCAmelCase__ : List[Any] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_A = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
_A = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_A = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
__SCREAMING_SNAKE_CASE : Tuple = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
__SCREAMING_SNAKE_CASE : Tuple = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
__SCREAMING_SNAKE_CASE : Any = scoring.BootstrapAggregator()
else:
__SCREAMING_SNAKE_CASE : List[str] = []
for ref, pred in zip(lowercase , lowercase ):
__SCREAMING_SNAKE_CASE : int = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
__SCREAMING_SNAKE_CASE : str = aggregator.aggregate()
else:
__SCREAMING_SNAKE_CASE : List[str] = {}
for key in scores[0]:
__SCREAMING_SNAKE_CASE : str = [score[key] for score in scores]
return result
| 158 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : Tuple = IFPipeline
__a : List[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__a : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__a : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def _snake_case ( self , lowercase , lowercase=0 ) -> int:
'''simple docstring'''
if str(lowercase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowercase )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase ).manual_seed(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
'''simple docstring'''
self._test_save_load_local()
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowercase , tokenizer=lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase , lowercase , lowercase , lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__SCREAMING_SNAKE_CASE : List[str] = IFImgaImgPipeline(**pipe_a.components )
__SCREAMING_SNAKE_CASE : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase , lowercase , lowercase , lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__SCREAMING_SNAKE_CASE : str = IFInpaintingPipeline(**pipe_a.components )
__SCREAMING_SNAKE_CASE : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase , lowercase , lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
# pipeline 2
_start_torch_memory_measurement()
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__SCREAMING_SNAKE_CASE : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(lowercase , lowercase )
def A_ ( ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 158 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase__ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowercase__ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowercase__ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def a_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> Any:
_a = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_a = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_a = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 276 |
'''simple docstring'''
from math import factorial, pi
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 276 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoencoderKL
SCREAMING_SNAKE_CASE__ = """sample"""
SCREAMING_SNAKE_CASE__ = 1e-2
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase_ (self ):
# enable deterministic behavior for gradient checkpointing
UpperCamelCase__ , UpperCamelCase__ = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCamelCase__ = torch.randn_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCamelCase__ = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCamelCase__ = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCamelCase__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCamelCase__ = dict(model.named_parameters() )
UpperCamelCase__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCamelCase__ = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
UpperCamelCase__ = torch.manual_seed(0 )
else:
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCamelCase__ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCamelCase__ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
UpperCamelCase__ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy"
def UpperCAmelCase_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 3, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = torch.floataa if fpaa else torch.floataa
UpperCamelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = """fp16""" if fpaa else None
UpperCamelCase__ = torch.floataa if fpaa else torch.floataa
UpperCamelCase__ = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder="""vae""" , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
UpperCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCamelCase__ = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.get_sd_vae_model()
UpperCamelCase__ = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
UpperCamelCase__ = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCamelCase__ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 513 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : Optional[int] = "mctct"
def __init__( self , lowerCamelCase=8065 , lowerCamelCase=1536 , lowerCamelCase=36 , lowerCamelCase=6144 , lowerCamelCase=4 , lowerCamelCase=384 , lowerCamelCase=920 , lowerCamelCase=1e-5 , lowerCamelCase=0.3 , lowerCamelCase="relu" , lowerCamelCase=0.02 , lowerCamelCase=0.3 , lowerCamelCase=0.3 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0.3 , lowerCamelCase=1 , lowerCamelCase=(7,) , lowerCamelCase=(3,) , lowerCamelCase=80 , lowerCamelCase=1 , lowerCamelCase=None , lowerCamelCase="sum" , lowerCamelCase=False , **lowerCamelCase , ) -> int:
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(A_ )
snake_case_ = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) | 703 |
import unittest
import numpy as np
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
snake_case_ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
snake_case_ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(lowercase_ )
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 161 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Union[str, Any] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Dict = False
SCREAMING_SNAKE_CASE: List[Any] = True
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_ , lowerCAmelCase_: str = self.get_input_output_texts(lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[str] = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _a ( self ):
lowerCAmelCase_: List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Union[str, Any] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[str] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: str = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
lowerCAmelCase_: List[Any] = MecabTokenizer(do_lower_case=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: Any = MecabTokenizer(
do_lower_case=lowerCamelCase__ , normalize_text=lowerCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _a ( self ):
lowerCAmelCase_: str = MecabTokenizer(normalize_text=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[str] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: List[str] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: List[str] = SudachiTokenizer(do_lower_case=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(normalize_text=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Tuple = SudachiTokenizer(trim_whitespace=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: Any = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Any = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Optional[Any] = JumanppTokenizer(normalize_text=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(trim_whitespace=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _a ( self ):
lowerCAmelCase_: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCAmelCase_: Tuple = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = i
lowerCAmelCase_: List[str] = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _a ( self ):
lowerCAmelCase_: List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCAmelCase_: Optional[Any] = tokenizer.subword_tokenizer
lowerCAmelCase_: List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCAmelCase_: Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCAmelCase_: int = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Optional[Any] = False
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , **lowerCamelCase__ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: str = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Tuple = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: List[str] = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = i
lowerCAmelCase_: List[Any] = CharacterTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCAmelCase_: Any = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = "cl-tohoku/bert-base-japanese"
lowerCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Dict = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCAmelCase_: List[str] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) ) | 613 | from __future__ import annotations
def snake_case__ ( lowercase , lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case__ ( lowercase , lowercase , lowercase ):
for j in range(lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: int = [float("inf" )] * vertex_count
lowerCAmelCase_: Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Any = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowerCAmelCase_: List[str] = distance[u] + w
lowerCAmelCase_: str = check_negative_cycle(lowercase , lowercase , lowercase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a : List[str] = int(input("""Enter number of vertices: """).strip())
a : Any = int(input("""Enter number of edges: """).strip())
a : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
a , a , a : Optional[int] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
a : Optional[int] = {"""src""": src, """dst""": dest, """weight""": weight}
a : List[Any] = int(input("""\nEnter shortest path source:""").strip())
a : Tuple = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0) | 613 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if gpta_config_file == "":
A_ : List[Any] = GPTaConfig()
else:
A_ : List[str] = GPTaConfig.from_json_file(_lowerCAmelCase )
A_ : Optional[int] = GPTaModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Save pytorch-model
A_ : str = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A_ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() ,_lowerCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCAmelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 481 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 481 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__UpperCAmelCase = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
__UpperCAmelCase = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__UpperCAmelCase = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
__UpperCAmelCase = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
__UpperCAmelCase = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
__UpperCAmelCase = """"""
__UpperCAmelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
__UpperCAmelCase = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__UpperCAmelCase = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
assert ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with pytest.raises(lowerCamelCase_ , match=re.escape(expected_error.format(path="""root""" ) ) ):
SCREAMING_SNAKE_CASE : Dict = ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with pytest.raises(lowerCamelCase_ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
ReadMe.from_string(lowerCamelCase_ , lowerCamelCase_ , suppress_parsing_errors=lowerCamelCase_ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : int = Path(lowerCamelCase_ ) / """README.md"""
with open(lowerCamelCase_ , """w+""" ) as readme_file:
readme_file.write(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[int] = Path(lowerCamelCase_ ) / """README.md"""
with open(lowerCamelCase_ , """w+""" ) as readme_file:
readme_file.write(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = expected_error.format(path=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ , match=re.escape(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[str] = ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[int] = Path(lowerCamelCase_ ) / """README.md"""
with open(lowerCamelCase_ , """w+""" ) as readme_file:
readme_file.write(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = expected_error.format(path=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ , match=re.escape(lowerCamelCase_ ) ):
ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = Path(lowerCamelCase_ ) / """README.md"""
with open(lowerCamelCase_ , """w+""" ) as readme_file:
readme_file.write(lowerCamelCase_ )
ReadMe.from_readme(lowerCamelCase_ , lowerCamelCase_ , suppress_parsing_errors=lowerCamelCase_ )
| 379 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : int , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
| 379 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case__ ):
def __init__( self , *A__ , **A__ ):
"""simple docstring"""
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ ) | 709 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 3
UpperCAmelCase_: Optional[int] = (32, 32)
UpperCAmelCase_: int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Union[str, Any] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: int = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: List[Any] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Union[str, Any] = DDPMScheduler()
UpperCAmelCase_: Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Any = output.images
assert image.shape[0] == 2
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: int = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Dict = self.dummy_text_encoder
UpperCAmelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_: List[str] = unet.half()
UpperCAmelCase_: Union[str, Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: str = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_: str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[str] = "a cat sitting on a park bench"
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Any = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Tuple = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: str = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_: Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 306 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__SCREAMING_SNAKE_CASE : List[Any] = random.Random()
def UpperCAmelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Any=1.0 , __magic_name__ : str=None , __magic_name__ : Optional[int]=None ):
'''simple docstring'''
if rng is None:
lowerCAmelCase : List[str] = global_rng
lowerCAmelCase : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[str]=7 , lowerCamelCase__ : Dict=4_0_0 , lowerCamelCase__ : Optional[Any]=2_0_0_0 , lowerCamelCase__ : Tuple=2_4 , lowerCamelCase__ : Optional[Any]=2_4 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : List[Any]=1_6_0_0_0 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : str=True , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Any = min_seq_length
lowerCAmelCase : List[Any] = max_seq_length
lowerCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : str = feature_size
lowerCAmelCase : str = num_mel_bins
lowerCAmelCase : int = padding_value
lowerCAmelCase : Optional[Any] = sampling_rate
lowerCAmelCase : List[Any] = return_attention_mask
lowerCAmelCase : Union[str, Any] = do_normalize
def _A ( self : List[Any] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]=False ):
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
lowerCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : Dict = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( _snake_case, unittest.TestCase ):
_lowerCAmelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def _A ( self : Any ):
lowerCAmelCase : Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def _A ( self : int , lowerCamelCase__ : Tuple ):
self.assertTrue(np.all(np.mean(lowerCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _A ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Union[str, Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCAmelCase : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase : Any = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
lowerCAmelCase : Any = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase : str = np.asarray(lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
lowerCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def _A ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase : Dict = [None, 1_6, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase : Optional[Any] = feature_extractor(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ )
lowerCAmelCase : int = inputs.input_features
lowerCAmelCase : List[Any] = inputs.attention_mask
lowerCAmelCase : Optional[Any] = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A ( self : Tuple ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase : List[str] = [None, 1_6, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase : Dict = feature_extractor(
lowerCamelCase__ , max_length=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = inputs.input_features
lowerCAmelCase : Tuple = inputs.attention_mask
lowerCAmelCase : List[str] = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A ( self : Dict ):
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : List[str] = feature_extractor(
lowerCamelCase__ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase : Union[str, Any] = inputs.input_features
lowerCAmelCase : Union[str, Any] = inputs.attention_mask
lowerCAmelCase : Optional[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _A ( self : List[str] ):
lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Dict = feature_extractor(
lowerCamelCase__ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase : Tuple = inputs.input_features
lowerCAmelCase : Union[str, Any] = inputs.attention_mask
lowerCAmelCase : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : List[Any] = feature_extractor(
lowerCamelCase__ , padding='''longest''' , max_length=1_6 , truncation=lowerCamelCase__ , return_tensors='''np''' , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase : Dict = inputs.input_features
lowerCAmelCase : str = inputs.attention_mask
lowerCAmelCase : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def _A ( self : str ):
import torch
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[str] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCAmelCase : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _A ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
from datasets import load_dataset
lowerCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase : Dict = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _A ( self : List[Any] ):
lowerCAmelCase : int = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
lowerCAmelCase : List[str] = self._load_datasamples(1 )
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Dict = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1E-4 ) )
| 348 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48000,
"sample_size": 131072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return torch.atana(__UpperCAmelCase , __UpperCAmelCase ) / math.pi * 2
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCAmelCase , __UpperCAmelCase )
class __a ( _snake_case ):
pass
class __a ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCamelCase ,n_attn_layers=4 )
__SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
__SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 ,scramble=lowerCamelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
a = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
a = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
a = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
a = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return name.replace(__UpperCAmelCase , __UpperCAmelCase )
elif name.startswith(__UpperCAmelCase ):
return [name.replace(__UpperCAmelCase , __UpperCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=13 ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__SCREAMING_SNAKE_CASE = 0
if string.startswith("""net.3.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("""net.""" ):
__SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("""main.""" ):
__SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
__SCREAMING_SNAKE_CASE = string[:2]
__SCREAMING_SNAKE_CASE = string[2:]
else:
__SCREAMING_SNAKE_CASE = string[0]
__SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
__SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = """mid_block"""
elif depth > 0 and int(__UpperCAmelCase ) < 7:
__SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(__UpperCAmelCase ) > 7:
__SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(__UpperCAmelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
__SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
__SCREAMING_SNAKE_CASE = convert_resconv_naming(__UpperCAmelCase )
elif "attentions" in new_layer:
__SCREAMING_SNAKE_CASE = convert_attn_naming(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = prefix + """.""" + new_layer + """.""" + string_left
else:
__SCREAMING_SNAKE_CASE = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__SCREAMING_SNAKE_CASE = rename(__UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = transform_conv_attns(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = v
return new_state_dict
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
__SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
__SCREAMING_SNAKE_CASE = v.shape[0]
__SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__SCREAMING_SNAKE_CASE = download(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_rate"""]
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_size"""]
__SCREAMING_SNAKE_CASE = Object()
__SCREAMING_SNAKE_CASE = sample_size
__SCREAMING_SNAKE_CASE = sample_rate
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=__UpperCAmelCase , sample_rate=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
__SCREAMING_SNAKE_CASE = DiffusionUncond(__UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCAmelCase )["""state_dict"""] )
__SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
__SCREAMING_SNAKE_CASE = orig_model.state_dict()
__SCREAMING_SNAKE_CASE = rename_orig_weights(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(__UpperCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__SCREAMING_SNAKE_CASE = value.squeeze()
__SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 100
__SCREAMING_SNAKE_CASE = 33
__SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=__UpperCAmelCase ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=__UpperCAmelCase )[:-1]
__SCREAMING_SNAKE_CASE = get_crash_schedule(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=__UpperCAmelCase , generator=__UpperCAmelCase ).audios
__SCREAMING_SNAKE_CASE = sampling.iplms_sample(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {} )
__SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
__SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
__SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCAmelCase )
print("""Diff max""" , __UpperCAmelCase )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
a = parser.parse_args()
main(args)
| 109 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Tuple="divided_space_time" , UpperCAmelCase_ : int=None , )-> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = num_frames
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = attention_type
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = (num_frames) * self.num_patches_per_frame + 1
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> int:
"""simple docstring"""
UpperCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCamelCase = self.num_labels
return config
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] )-> Dict:
"""simple docstring"""
UpperCamelCase = TimesformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = TimesformerForVideoClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase = model(__UpperCamelCase )
# verify the logits shape
UpperCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase_ : Tuple = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCamelCase = TimesformerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=False )-> Any:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : str )-> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(__UpperCamelCase )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TimesformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int )-> Dict:
"""simple docstring"""
if not self.has_attentions:
pass
else:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
UpperCamelCase = self.model_tester.seq_length
UpperCamelCase = self.model_tester.num_frames
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCamelCase = len(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( )-> Dict:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase = np.load(_A )
return list(_A )
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
UpperCamelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__UpperCamelCase )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_video()
UpperCamelCase = image_processor(video[:8] , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 719 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ )-> list:
"""simple docstring"""
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
UpperCamelCase = []
def generate(UpperCAmelCase_ , UpperCAmelCase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCamelCase , UpperCamelCase = arr[k - 1], arr[i]
else: # k is odd
UpperCamelCase , UpperCamelCase = arr[k - 1], arr[0]
generate(k - 1 , UpperCAmelCase_ )
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 556 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__lowercase : int = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( ):
__snake_case = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''')
parser.add_argument('''--file_path''', type=lowerCamelCase_, default='''data/dump.txt''', help='''The path to the data.''')
parser.add_argument('''--tokenizer_type''', type=lowerCamelCase_, default='''bert''', choices=['''bert''', '''roberta''', '''gpt2'''])
parser.add_argument('''--tokenizer_name''', type=lowerCamelCase_, default='''bert-base-uncased''', help='''The tokenizer to use.''')
parser.add_argument('''--dump_file''', type=lowerCamelCase_, default='''data/dump''', help='''The dump file prefix.''')
__snake_case = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})")
if args.tokenizer_type == "bert":
__snake_case = BertTokenizer.from_pretrained(args.tokenizer_name)
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name)
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name)
__snake_case = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__snake_case = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}")
with open(args.file_path, '''r''', encoding='''utf8''') as fp:
__snake_case = fp.readlines()
logger.info('''Start encoding''')
logger.info(f"{len(lowerCamelCase_)} examples to process.")
__snake_case = []
__snake_case = 0
__snake_case = 1_00_00
__snake_case = time.time()
for text in data:
__snake_case = f"{bos} {text.strip()} {sep}"
__snake_case = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_)
rslt.append(lowerCamelCase_)
iter += 1
if iter % interval == 0:
__snake_case = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl")
__snake_case = time.time()
logger.info('''Finished binarization''')
logger.info(f"{len(lowerCamelCase_)} examples processed.")
__snake_case = f"{args.dump_file}.{args.tokenizer_name}.pickle"
__snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
__snake_case = [np.uintaa(lowerCamelCase_) for d in rslt]
else:
__snake_case = [np.intaa(lowerCamelCase_) for d in rslt]
random.shuffle(rslt_)
logger.info(f"Dump to {dp_file}")
with open(lowerCamelCase_, '''wb''') as handle:
pickle.dump(rslt_, lowerCamelCase_, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main() | 564 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = "arrow" ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(
split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[Any] = load_from_cache_file
lowerCAmelCase__ : Any = file_format
lowerCAmelCase__ : Dict = Spark(
df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,)
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 647 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
__lowerCAmelCase : Union[str, Any] = tmp_path / """cache"""
__lowerCAmelCase : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : int = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict ) -> Dict:
__lowerCAmelCase : Tuple = tmp_path / """cache"""
__lowerCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple ) -> int:
__lowerCAmelCase : List[Any] = tmp_path / """cache"""
__lowerCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = [parquet_path]
__lowerCAmelCase : List[str] = tmp_path / """cache"""
__lowerCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int=("train",) ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__lowerCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[int]:
__lowerCAmelCase : Tuple = tmp_path / """cache"""
__lowerCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : str = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Union[str, Any]:
__lowerCAmelCase : Dict = tmp_path / """cache"""
__lowerCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : Any = features.copy() if features else default_expected_features
__lowerCAmelCase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Optional[Any] = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any ) -> List[str]:
if split:
__lowerCAmelCase : Optional[Any] = {split: parquet_path}
else:
__lowerCAmelCase : str = """train"""
__lowerCAmelCase : Tuple = {"""train""": parquet_path, """test""": parquet_path}
__lowerCAmelCase : Tuple = tmp_path / """cache"""
__lowerCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any ) -> Optional[int]:
__lowerCAmelCase : Tuple = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowerCAmelCase : Optional[Any] = pq.ParquetFile(tmp_path / """foo.parquet""" )
__lowerCAmelCase : Tuple = pf.read()
assert dataset.data.table == output_table
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any] ) -> int:
__lowerCAmelCase : str = str(shared_datadir / """test_image_rgb.jpg""" )
__lowerCAmelCase : List[str] = {"""image""": [image_path]}
__lowerCAmelCase : List[Any] = Features({"""image""": Image()} )
__lowerCAmelCase : Any = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowerCAmelCase : str = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase : Dict = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int ) -> Dict:
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected | 240 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_UpperCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
for attribute in key.split(""".""" ):
__lowerCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowerCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : Any = value
elif weight_type == "weight_v":
__lowerCAmelCase : Tuple = value
elif weight_type == "bias":
__lowerCAmelCase : str = value
else:
__lowerCAmelCase : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCAmelCase : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCAmelCase : int = None
for name, value in fairseq_dict.items():
__lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase : Tuple = True
elif name.split(""".""" )[0] == "proj":
__lowerCAmelCase : Tuple = fairseq_model.proj
__lowerCAmelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : Union[str, Any] = name.split(SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__lowerCAmelCase : List[str] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase : int = """weight_v"""
elif "bias" in name:
__lowerCAmelCase : Tuple = """bias"""
elif "weight" in name:
__lowerCAmelCase : int = """weight"""
else:
__lowerCAmelCase : Tuple = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase : List[Any] = name.split(""".""" )
__lowerCAmelCase : Any = int(items[0] )
__lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__lowerCAmelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__lowerCAmelCase : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__lowerCAmelCase : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase : str = emb.weight.shape
__lowerCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Dict:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : List[Any] = f.readlines()
__lowerCAmelCase : Any = [line.split(""" """ )[0] for line in lines]
__lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , ) -> List[str]:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase : int = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCAmelCase : int = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
__lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__lowerCAmelCase : Union[str, Any] = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = False
# add projection layer
__lowerCAmelCase : str = nn.Parameter(projection_layer.weight )
__lowerCAmelCase : str = nn.Parameter(projection_layer.bias )
__lowerCAmelCase : Dict = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = hf_wavavec.config.to_dict()
__lowerCAmelCase : int = tokenizer.pad_token_id
__lowerCAmelCase : List[str] = tokenizer.bos_token_id
__lowerCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__lowerCAmelCase : Any = """speech_to_text_2"""
__lowerCAmelCase : Tuple = """wav2vec2"""
__lowerCAmelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 240 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case = 1_0_0_0 ) -> Any:
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 518 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
lowerCamelCase : str = hex_num[0] == "-"
if is_negative:
lowerCamelCase : Dict = hex_num[1:]
try:
lowerCamelCase : List[Any] = int(SCREAMING_SNAKE_CASE_ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
lowerCamelCase : Dict = ""
while int_num > 0:
lowerCamelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 0 |
import math
def lowercase (snake_case__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (snake_case__ : float = 0.1 ) -> int:
'''simple docstring'''
lowerCAmelCase = 3
lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import os
import string
import sys
a = 1 << 8
a = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
a = KEYMAP['up']
a = KEYMAP['left']
if sys.platform == "win32":
a = []
a = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a = ord(str(i))
def lowercase () -> str:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(snake_case__ )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def lowercase () -> List[str]:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 529 | 0 |
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[float] , lowercase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[float] , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = 0.0
for coeff in reversed(lowercase ):
lowerCamelCase_ = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase : Union[str, Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 70 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TransfoXLTokenizer
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Optional[Any] , **A_ : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : List[str] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<unk> UNwanted , running'
lowerCamelCase_ = '<unk> unwanted, running'
return input_text, output_text
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
lowerCamelCase_ = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
lowerCamelCase_ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowerCamelCase_ = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 70 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 702 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=snake_case, default=snake_case, required=snake_case, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=snake_case, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=snake_case, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=snake_case, default=4_2, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=snake_case, default=0, help='''cuda_id.''', )
__magic_name__ :Optional[Any] = parser.parse_args()
return args
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not len(snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__magic_name__ , __magic_name__ :Tuple = imgs[0].size
__magic_name__ :List[Any] = Image.new('''RGB''', size=(cols * w, rows * h) )
__magic_name__ , __magic_name__ :List[Any] = grid.size
for i, img in enumerate(snake_case ):
grid.paste(snake_case, box=(i % cols * w, i // cols * h) )
return grid
def __lowercase ( snake_case, snake_case="robotic cat with wings", snake_case=7.5, snake_case=5_0, snake_case=1, snake_case=4_2, ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.Generator(pipeline.device ).manual_seed(snake_case )
__magic_name__ :str = pipeline(
snake_case, guidance_scale=snake_case, num_inference_steps=snake_case, generator=snake_case, num_images_per_prompt=snake_case, ).images
__magic_name__ :Tuple = int(math.sqrt(snake_case ) )
__magic_name__ :Union[str, Any] = image_grid(snake_case, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE__ : Any = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
SCREAMING_SNAKE_CASE__ : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
SCREAMING_SNAKE_CASE__ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE__ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
SCREAMING_SNAKE_CASE__ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
SCREAMING_SNAKE_CASE__ : Dict = unet.to(torch.device("""cuda""", args.cuda_id))
SCREAMING_SNAKE_CASE__ : Any = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
SCREAMING_SNAKE_CASE__ : int = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 180 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a_ :Dict = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__=None , A__=None ) -> Dict:
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : str = tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(A__ , A__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ : List[str] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE__ : Dict = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(A__ , A__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Dict = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to('''cpu''' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : Optional[int] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(A__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A__ ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : int = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : int = bnb.nn.IntaParams(A__ , requires_grad=A__ , **A__ ).to(A__ )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Paramsabit(A__ , requires_grad=A__ , **A__ ).to(A__ )
SCREAMING_SNAKE_CASE__ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(A__ ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Tuple = value.to(A__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(A__ , device=A__ )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Dict = new_value
else:
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(A__ , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
def a ( A__ , A__=None , A__=None , A__=None , A__=False ) -> Tuple:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(A__ )
if (isinstance(A__ , nn.Linear ) or isinstance(A__ , A__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(A__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : Dict = module.in_features
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : str = bnb.nn.LinearabitLt(
A__ , A__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Dict = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.Linearabit(
A__ , A__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Optional[Any] = type(A__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A__ )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ , has_been_replaced=A__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( A__ , A__=None , A__=None , A__=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *A__ , **A__ ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , A__ , )
return replace_with_bnb_linear(*A__ , **A__ )
def a ( *A__ , **A__ ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , A__ , )
return set_module_quantized_tensor_to_device(*A__ , **A__ )
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : str = sum(A__ , [] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(A__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Tuple = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : int = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : str = set(A__ ) - set(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(A__ , '''''' )
filtered_module_names.append(A__ )
return filtered_module_names
| 35 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCAmelCase : Dict = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase ( lowercase_ : Union[str, Version] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase =STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
lowercase =parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 72 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=lowerCAmelCase_ , speech_processor=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def __lowerCAmelCase ( self , lowerCAmelCase_ = "auto" ):
if slice_size == "auto":
_lowercase =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
self.enable_attention_slicing(lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=16000 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ):
_lowercase =self.speech_processor.feature_extractor(
lowerCAmelCase_ , return_tensors="pt" , sampling_rate=lowerCAmelCase_ ).input_features.to(self.device )
_lowercase =self.speech_model.generate(lowerCAmelCase_ , max_length=480000 )
_lowercase =self.speech_processor.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , normalize=lowerCAmelCase_ )[
0
]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =1
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =len(lowerCAmelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCAmelCase_ )}.''' )
# get prompt text embeddings
_lowercase =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_lowercase =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowercase =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowercase =text_input_ids[:, : self.tokenizer.model_max_length]
_lowercase =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowercase , _lowercase , _lowercase =text_embeddings.shape
_lowercase =text_embeddings.repeat(1 , lowerCAmelCase_ , 1 )
_lowercase =text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase =42
if negative_prompt is None:
_lowercase =[""] * batch_size
elif type(lowerCAmelCase_ ) is not type(lowerCAmelCase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase_ )} !='''
F''' {type(lowerCAmelCase_ )}.''' )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =[negative_prompt]
elif batch_size != len(lowerCAmelCase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
_lowercase =negative_prompt
_lowercase =text_input_ids.shape[-1]
_lowercase =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" , )
_lowercase =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowercase =uncond_embeddings.shape[1]
_lowercase =uncond_embeddings.repeat(1 , lowerCAmelCase_ , 1 )
_lowercase =uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowercase =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowercase =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_ ).to(
self.device )
else:
_lowercase =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_lowercase =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowercase =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowercase =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowercase ={}
if accepts_eta:
_lowercase =eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowercase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowercase =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
_lowercase =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowercase , _lowercase =noise_pred.chunk(2 )
_lowercase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowercase =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =1 / 0.1_8_2_1_5 * latents
_lowercase =self.vae.decode(lowerCAmelCase_ ).sample
_lowercase =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowercase =self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
| 594 | from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 't5'
__SCREAMING_SNAKE_CASE = ['past_key_values']
__SCREAMING_SNAKE_CASE = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowerCAmelCase_=32128 , lowerCAmelCase_=512 , lowerCAmelCase_=64 , lowerCAmelCase_=2048 , lowerCAmelCase_=6 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=32 , lowerCAmelCase_=128 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ):
_lowercase =vocab_size
_lowercase =d_model
_lowercase =d_kv
_lowercase =d_ff
_lowercase =num_layers
_lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase =num_heads
_lowercase =relative_attention_num_buckets
_lowercase =relative_attention_max_distance
_lowercase =dropout_rate
_lowercase =layer_norm_epsilon
_lowercase =initializer_factor
_lowercase =feed_forward_proj
_lowercase =use_cache
_lowercase =self.feed_forward_proj.split("-" )
_lowercase =act_info[-1]
_lowercase =act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowercase ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
_lowercase ={
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowercase ="past_encoder_sequence + sequence"
_lowercase ={0: "batch"}
_lowercase ={0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowercase ={0: "batch", 1: "decoder_sequence"}
_lowercase ={0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
def __lowerCAmelCase ( self ):
return 13
| 594 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A__ : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A__ : Optional[Any] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A__ : Optional[Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> Optional[int]:
return float((preds == labels).mean() )
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> Any:
__snake_case : Any = simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )
__snake_case : Optional[int] = float(fa_score(y_true=__UpperCAmelCase ,y_pred=__UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[int] = np.array(__UpperCAmelCase )
__snake_case : Union[str, Any] = np.array(__UpperCAmelCase )
__snake_case : int = en_sentvecs.shape[0]
# mean centering
__snake_case : Union[str, Any] = en_sentvecs - np.mean(__UpperCAmelCase ,axis=0 )
__snake_case : str = in_sentvecs - np.mean(__UpperCAmelCase ,axis=0 )
__snake_case : str = cdist(__UpperCAmelCase ,__UpperCAmelCase ,'cosine' )
__snake_case : str = np.array(range(__UpperCAmelCase ) )
__snake_case : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__snake_case : Dict = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def A_ ( self : Optional[int] , __a : Optional[Any] , __a : Optional[int] ) -> int:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase_ , lowercase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase_ , lowercase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 286 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : str = 16
a : Union[str, Any] = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCAmelCase, padding='''longest''', max_length=__UpperCAmelCase, pad_to_multiple_of=__UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Optional[Any] = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __UpperCAmelCase ) == "1":
snake_case_ = 2
# New Code #
snake_case_ = int(args.gradient_accumulation_steps )
snake_case_ = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters(), lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCAmelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCAmelCase, model=__UpperCAmelCase, local_sgd_steps=__UpperCAmelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ ,snake_case_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__UpperCAmelCase, default=__UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=__UpperCAmelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=__UpperCAmelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 640 | 0 |
def __lowerCAmelCase ( A , A ):
# Check if the input is valid
if not len(A ) == len(A ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ = aa * ba - aa * ba
UpperCAmelCase_ = ca * ba - ca * ba
UpperCAmelCase_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ = determinant_x / determinant
UpperCAmelCase_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 268 |
def __lowerCAmelCase ( A ):
if len(A ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ = 1
return lst
if __name__ == "__main__":
_a: List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
_a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 268 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class lowerCamelCase_ :
def __init__( self : Dict , _A : int=None , _A : Any=None , _A : List[str]=None , _A : Optional[int]=None , _A : Optional[int]=None ):
'''simple docstring'''
self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A )
def lowercase_ ( self : Optional[Any] , _A : int=None , _A : Union[str, Any]=None , _A : Any=None , _A : Dict=None , _A : Optional[Any]=None ):
'''simple docstring'''
if red is not None:
UpperCAmelCase__ : Dict = red
if green is not None:
UpperCAmelCase__ : Tuple = green
if blue is not None:
UpperCAmelCase__ : Any = blue
if red_edge is not None:
UpperCAmelCase__ : Dict = red_edge
if nir is not None:
UpperCAmelCase__ : Any = nir
return True
def lowercase_ ( self : int , _A : Dict="" , _A : Union[str, Any]=None , _A : str=None , _A : Any=None , _A : str=None , _A : int=None ):
'''simple docstring'''
self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A )
UpperCAmelCase__ : Union[str, Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowercase_ ( self : str ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase_ ( self : Any ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase_ ( self : str ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase_ ( self : str ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase_ ( self : Tuple , _A : int=0.0_8 , _A : int=1.2_2 , _A : List[Any]=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowercase_ ( self : Any ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir - self.green
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowercase_ ( self : Optional[int] , _A : List[Any]=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase_ ( self : List[str] , _A : str=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def lowercase_ ( self : Optional[Any] , _A : str=None , _A : Any=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.nir / self.red
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowercase_ ( self : int ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase__ : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.nir / self.red
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 75 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str ={
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 15 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ):
lowerCamelCase_ = size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
def UpperCAmelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = LevitImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 29 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCAmelCase ( __A : int ):
a_ : Optional[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
a_ : Union[str, Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
a_ : List[Any] = 0.01
with locka.acquire():
with pytest.raises(__A ):
a_ : List[Any] = time.time()
locka.acquire(__A )
assert time.time() - _start > timeout
def _UpperCAmelCase ( __A : Tuple ):
a_ : Tuple = '''a''' * 10_00 + '''.lock'''
a_ : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__A )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
a_ : Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__A ):
locka.acquire(0 )
| 466 | 0 |
def lowerCAmelCase_ ( _UpperCamelCase ) -> str:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
_a = str(lowercase__ )
_a = ''''''.join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCAmelCase_ ( _UpperCamelCase = 99 ) -> int:
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
_a = 0
_a = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 702 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase :Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase :List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase :List[str] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase :Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase :List[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase :int = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCamelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ) -> Union[str, Any]:
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
_a = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
_a = tf_models
_a = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
_a = flax_models
_a = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
_a = pt_models
_a = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(_UpperCamelCase )[:-1] )
_a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a = list(_UpperCamelCase )
all_models.sort()
_a = {'''model_type''': all_models}
_a = [pt_models[t] for t in all_models]
_a = [tf_models[t] for t in all_models]
_a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a = '''AutoTokenizer'''
_a = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> int:
_a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
_a = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
continue
# First extract all model_names
_a = []
for name in getattr(_UpperCamelCase , _UpperCamelCase ).values():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = get_frameworks_table()
_a = Dataset.from_pandas(_UpperCamelCase )
_a = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=_UpperCamelCase )
_a = Dataset.from_json(_UpperCamelCase )
_a = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(_UpperCamelCase ) )
}
_a = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a = sorted(table.keys() )
_a = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_a = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_a = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_a = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=_UpperCamelCase , repo_type='''dataset''' , token=_UpperCamelCase , commit_message=_UpperCamelCase , )
def __snake_case ( ) -> str:
_a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a = transformers_module.pipelines.SUPPORTED_TASKS
_a = []
for key in pipeline_tasks:
if key not in in_table:
_a = pipeline_tasks[key]['''pt''']
if isinstance(_UpperCamelCase , (list, tuple) ):
_a = model[0]
_a = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_a = ''', '''.join(_UpperCamelCase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase :Dict = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 346 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
__UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__UpperCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__UpperCAmelCase = {'unk_token': '<unk>'}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
__UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__A , __A )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **__A )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **__A )
def __lowerCamelCase ( self , **__A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCAmelCase = self.get_image_processor(do_normalize=__A )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(__A , return_tensors='np' )
__UpperCAmelCase = processor(images=__A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = processor(text=__A , return_tensors='np' )
__UpperCAmelCase = tokenizer(__A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = ['cat', 'nasa badge']
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = [['cat', 'nasa badge'], ['person']]
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
__UpperCAmelCase = len(__A )
__UpperCAmelCase = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = ['cat', 'nasa badge']
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
__UpperCAmelCase = inputs['input_ids']
__UpperCAmelCase = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.batch_decode(__A )
__UpperCAmelCase = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 126 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_A: Union[str, Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _lowerCAmelCase ( _lowerCAmelCase=None )-> Optional[Any]:
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
__UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_lowerCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_lowerCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_lowerCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
__UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
__UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCAmelCase = defaults.commands
if not args.tpu_name:
__UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
__UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
__UpperCAmelCase = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
__UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCAmelCase = '; '.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCAmelCase )}' )
return
subprocess.run(_lowerCAmelCase )
print('Successfully setup pod.' )
def _lowerCAmelCase ( )-> Any:
__UpperCAmelCase = tpu_command_parser()
__UpperCAmelCase = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 126 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase__ =False
class lowerCamelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=A__ , text_to_image_strength=0.75 , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__ )
__lowercase = VersatileDiffusionPipeline.from_pretrained(A__ , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=A__ , text_to_image_strength=0.75 , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowercase = """cyberpunk 2077"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt=A__ , image=A__ , text_to_image_strength=0.75 , generator=A__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
__lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.text_to_image(
prompt=A__ , generator=A__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" ).images
__lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = pipe.image_variation(A__ , generator=A__ , output_type="""numpy""" ).images
__lowercase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 700 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCAmelCase__ =logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
UpperCAmelCase__ =parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
UpperCAmelCase__ =pickle.load(fp)
logger.info("Counting occurrences for MLM.")
UpperCAmelCase__ =Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase__ =[0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase__ =v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 442 | 0 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a ,__a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_a : List[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a__ : Optional[Any] = None
a__ : Any = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a__ : Dict = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
a__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
a__ : int = '▁'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =["input_ids", "token_type_ids"]
_lowerCamelCase =FNetTokenizer
def __init__( self : List[Any] , a__ : Optional[Any]=None , a__ : Optional[int]=None , a__ : List[str]=False , a__ : Tuple=True , a__ : int=True , a__ : Optional[Any]="<unk>" , a__ : Union[str, Any]="[SEP]" , a__ : int="<pad>" , a__ : Dict="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase = (
AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ )
if isinstance(a__ , a__ )
else mask_token
)
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[str] = None ):
if not os.path.isdir(a__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 570 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *a__ : Optional[int] , **a__ : List[Any] ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 570 | 1 |
import heapq
import sys
import numpy as np
__lowerCamelCase = tuple[int, int]
class _snake_case :
"""simple docstring"""
def __init__( self ) -> int:
"""simple docstring"""
_A = []
_A = set()
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def lowercase_ ( self , a , a ) -> Optional[Any]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(a )
else:
# update
# print("update", item)
_A = []
((_A) , (_A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_A) , (_A)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase_ ( self , a ) -> str:
"""simple docstring"""
if item in self.set:
self.set.remove(a )
_A = []
((_A) , (_A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_A) , (_A)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
return self.elements[0][1]
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
((_A) , (_A)) = heapq.heappop(self.elements )
self.set.remove(a )
return (priority, item)
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> List[str]:
# euclidean distance
_A = np.array(__snake_case )
_A = np.array(__snake_case )
return np.linalg.norm(a - b )
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> Tuple:
# integer division by time variable
return consistent_heuristic(__snake_case , __snake_case ) // t
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> str:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
_A = g_function[start] + Wa * heuristics[i](__snake_case , __snake_case )
return ans
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> str:
_A = np.chararray((n, n) )
for i in range(__snake_case ):
for j in range(__snake_case ):
_A = '''*'''
for i in range(__snake_case ):
for j in range(__snake_case ):
if (j, (n - 1) - i) in blocks:
_A = '''#'''
_A = '''-'''
_A = back_pointer[goal]
while x != start:
((_A) , (_A)) = x
# print(x)
_A = '''-'''
_A = back_pointer[x]
_A = '''-'''
for i in range(__snake_case ):
for j in range(__snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
_A = back_pointer[goal]
while x != start:
print(__snake_case , end=''' ''' )
_A = back_pointer[x]
print(__snake_case )
sys.exit()
def UpperCAmelCase__ ( __snake_case ) -> List[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
for itera in range(__snake_case ):
open_list[itera].remove_element(__snake_case )
# print("s", s)
# print("j", j)
((_A) , (_A)) = s
_A = (x - 1, y)
_A = (x + 1, y)
_A = (x, y + 1)
_A = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__snake_case )
_A = -1
_A = float('''inf''' )
if valid(__snake_case ) and g_function[neighbours] > g_function[s] + 1:
_A = g_function[s] + 1
_A = s
if neighbours not in close_list_anchor:
open_list[0].put(__snake_case , key(__snake_case , 0 , __snake_case , __snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , __snake_case ):
if key(__snake_case , __snake_case , __snake_case , __snake_case ) <= Wa * key(
__snake_case , 0 , __snake_case , __snake_case ):
open_list[j].put(
__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
def UpperCAmelCase__ ( ) -> List[str]:
_A = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__lowerCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
__lowerCamelCase = make_common_ground()
__lowerCamelCase = blocks_blk
# hyper parameters
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = 2_0
__lowerCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCamelCase = (0, 0)
__lowerCamelCase = (n - 1, n - 1)
__lowerCamelCase = 1
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> Dict:
_A = {start: 0, goal: float('''inf''' )}
_A = {start: -1, goal: -1}
_A = []
_A = set()
for i in range(__snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
_A = []
_A = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__snake_case , __snake_case , __snake_case )
else:
_A , _A = open_list[i].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_inad.append(__snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__snake_case , __snake_case , __snake_case )
else:
_A = open_list[0].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , 0 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_anchor.append(__snake_case )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__snake_case ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 317 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a , )
assert hasattr(self , '''env''' )
def lowercase_ ( self , a=1 ) -> Optional[int]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowercase_ ( self , a ) -> Any:
"""simple docstring"""
TrainingJobAnalytics(a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a ) | 317 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 10
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[Any] = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_UpperCAmelCase : list[list] = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_UpperCAmelCase : Optional[int] = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
_UpperCAmelCase : Dict = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
_UpperCAmelCase : Optional[int] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 42
__a = 42
def __init__( self : str , UpperCamelCase : UNetaDModel , UpperCamelCase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : int , UpperCamelCase : int = 1 , UpperCamelCase : int = 2_000 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : int = self.unet.config.sample_size
__UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : Tuple = self.unet
__UpperCAmelCase : List[Any] = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma
__UpperCAmelCase : Optional[int] = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
self.scheduler.set_sigmas(UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase : Dict = self.unet(UpperCamelCase , UpperCamelCase ).sample
__UpperCAmelCase : Any = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# prediction step
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase , UpperCamelCase ).sample
__UpperCAmelCase : str = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Dict = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : Union[str, Any] = sample_mean.clamp(0 , 1 )
__UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : int = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase )
| 139 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=True , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Dict=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : int=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=4 , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Any = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__UpperCAmelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : Tuple = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 139 | 1 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A__ : List[Any] = get_logger(__name__)
A__ : List[Any] = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class snake_case__ :
@add_start_docstrings(__a )
def __call__( self : Union[str, Any] , __a : jnp.ndarray , __a : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ :
@add_start_docstrings(__a )
def __call__( self : int , __a : jnp.ndarray , __a : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@add_start_docstrings(__a )
def __call__( self : List[str] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int , **__a : str ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
__snake_case : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__snake_case : Union[str, Any] = processor(__a , __a , __a , **__a )
else:
__snake_case : List[str] = processor(__a , __a , __a )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , __a : float ) -> Any:
'''simple docstring'''
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__snake_case : Optional[Any] = temperature
def __call__( self : Any , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case : int = scores / self.temperature
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __a : float , __a : float = -float('Inf' ) , __a : int = 1 ) -> List[str]:
'''simple docstring'''
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__snake_case : Union[str, Any] = top_p
__snake_case : List[Any] = filter_value
__snake_case : Tuple = min_tokens_to_keep
def __call__( self : Optional[int] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = lax.top_k(__a , scores.shape[-1] )
__snake_case : Optional[Any] = jnp.full_like(__a , self.filter_value )
__snake_case : str = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
__snake_case : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__snake_case : List[str] = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
__snake_case : Tuple = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
__snake_case : str = jnp.where(__a , __a , __a )
__snake_case : List[Any] = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , __a : int , __a : float = -float('Inf' ) , __a : int = 1 ) -> Dict:
'''simple docstring'''
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__snake_case : int = max(__a , __a )
__snake_case : Tuple = filter_value
def __call__( self : Tuple , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case , __snake_case : List[str] = scores.shape
__snake_case : Optional[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__snake_case : Optional[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__snake_case , __snake_case : Optional[int] = lax.top_k(__a , __a )
__snake_case : str = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__snake_case : Tuple = topk_scores.flatten()
__snake_case : int = topk_indices.flatten() + shift
__snake_case : List[str] = next_scores_flat.at[topk_indices_flat].set(__a )
__snake_case : Optional[Any] = next_scores_flat.reshape(__a , __a )
return next_scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __a : int ) -> int:
'''simple docstring'''
__snake_case : Dict = bos_token_id
def __call__( self : str , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case : Tuple = jnp.full(scores.shape , -float('inf' ) )
__snake_case : Tuple = 1 - jnp.bool_(cur_len - 1 )
__snake_case : Optional[int] = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , __a : int , __a : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = max_length
__snake_case : Dict = eos_token_id
def __call__( self : Dict , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case : Dict = jnp.full(scores.shape , -float('inf' ) )
__snake_case : Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__snake_case : int = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __a : int , __a : int ) -> Dict:
'''simple docstring'''
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__snake_case : Union[str, Any] = min_length
__snake_case : Optional[int] = eos_token_id
def __call__( self : str , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
__snake_case : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__snake_case : Any = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , __a )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , __a : Any , __a : str ) -> List[str]:
'''simple docstring'''
__snake_case : List[str] = list(__a )
__snake_case : Union[str, Any] = begin_index
def __call__( self : int , __a : Tuple , __a : Union[str, Any] , __a : int ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__snake_case : Tuple = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , __a )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , __a : list ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = list(__a )
def __call__( self : List[Any] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
__snake_case : Any = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , __a : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : str = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__snake_case : Union[str, Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__snake_case : List[str] = force_token_array.at[index].set(__a )
__snake_case : Optional[int] = jnp.intaa(__a )
def __call__( self : Optional[int] , __a : jnp.ndarray , __a : jnp.ndarray , __a : int ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(__a : int ):
__snake_case : Tuple = scores.shape[0]
__snake_case : Optional[int] = self.force_token_array[generation_idx]
__snake_case : List[Any] = jnp.ones_like(__a , dtype=scores.dtype ) * -float('inf' )
__snake_case : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__snake_case : int = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
__snake_case : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , __a : str , __a : Tuple , __a : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = generate_config.eos_token_id
__snake_case : Optional[int] = generate_config.no_timestamps_token_id
__snake_case : Union[str, Any] = generate_config.no_timestamps_token_id + 1
__snake_case : Optional[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , 'max_initial_timestamp_index' ):
__snake_case : List[str] = generate_config.max_initial_timestamp_index
else:
__snake_case : Tuple = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__snake_case : Any = model_config.vocab_size
def __call__( self : Dict , __a : Optional[Any] , __a : int , __a : Optional[int] ) -> List[Any]:
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
__snake_case : Tuple = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__a : Any , __a : Tuple ):
__snake_case : Dict = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
__snake_case : Optional[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
__snake_case : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
__snake_case : Tuple = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , __a , )
__snake_case : Optional[int] = jax.vmap(__a )(__a , __a )
__snake_case : Any = jnp.where(cur_len == self.begin_index , __a , __a )
__snake_case : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
__snake_case : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
__snake_case : Union[str, Any] = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__snake_case : Union[str, Any] = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(__a : int , __a : Any ):
__snake_case : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__snake_case : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , __a , )
__snake_case : int = jax.vmap(__a )(__a , __a )
return scores
| 124 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__ :
def __init__( self : Optional[int] , __a : Any , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=True , __a : List[Any]=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]=99 , __a : List[Any]=32 , __a : Optional[int]=2 , __a : Optional[Any]=4 , __a : Dict=37 , __a : str="gelu" , __a : str=0.1 , __a : List[Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[int]=16 , __a : List[Any]=2 , __a : Any=0.0_2 , __a : Tuple=3 , __a : Optional[int]=4 , __a : List[str]=None , __a : str=1000 , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : List[str] = is_training
__snake_case : Union[str, Any] = use_input_mask
__snake_case : Tuple = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Tuple = scope
__snake_case : List[str] = range_bbox
def A_ ( self : int ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[Any] = bbox[i, j, 3]
__snake_case : Optional[int] = bbox[i, j, 1]
__snake_case : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : int = bbox[i, j, 2]
__snake_case : Any = bbox[i, j, 0]
__snake_case : Any = t
__snake_case : Any = tf.convert_to_tensor(__a )
__snake_case : Optional[Any] = None
if self.use_input_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Any = None
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : int ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModel(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
__snake_case : str = model(__a , __a , token_type_ids=__a )
__snake_case : List[str] = model(__a , __a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Dict , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : str , __a : List[Any] , __a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForMaskedLM(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : List[str] , __a : Any , __a : Dict , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : str , __a : Optional[int] , __a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : str = TFLayoutLMForSequenceClassification(config=__a )
__snake_case : Any = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] , __a : List[Any] , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any , __a : Union[str, Any] , __a : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : Union[str, Any] = TFLayoutLMForTokenClassification(config=__a )
__snake_case : str = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __a : Optional[Any] , __a : List[str] , __a : str , __a : str , __a : Optional[int] , __a : Tuple , __a : Any , __a : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForQuestionAnswering(config=__a )
__snake_case : Optional[Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : int = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = True
A__ = 10
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@slow
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = TFLayoutLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__snake_case : Optional[Any] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__snake_case : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__snake_case : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__snake_case : Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__snake_case : int = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the sequence output on [0, :3, :3]
__snake_case : Tuple = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-3 ) )
# test the pooled output on [1, :3]
__snake_case : Any = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __a , atol=1e-3 ) )
@slow
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
__snake_case : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : int = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__snake_case : Optional[int] = outputs.loss
__snake_case : Tuple = (2,)
self.assertEqual(loss.shape , __a )
# test the shape of the logits
__snake_case : Optional[int] = outputs.logits
__snake_case : str = (2, 2)
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a )
# test the shape of the logits
__snake_case : Any = outputs.logits
__snake_case : Optional[int] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : List[str] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : Optional[Any] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the shape of the logits
__snake_case : Optional[Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __a )
self.assertEqual(outputs.end_logits.shape , __a )
| 124 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = 'ClapFeatureExtractor'
a :List[str] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE_ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audios is not None:
lowercase_ = self.feature_extractor(
SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and audios is not None:
lowercase_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.tokenizer.model_input_names
lowercase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 97 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if len(lowerCAmelCase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = nums.pop(0 )
__SCREAMING_SNAKE_CASE = permute(lowerCAmelCase_ )
for perm in permutations:
perm.append(lowerCAmelCase_ )
result.extend(lowerCAmelCase_ )
nums.append(lowerCAmelCase_ )
return result
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
def backtrack(lowerCAmelCase_ ):
if start == len(lowerCAmelCase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = nums[i], nums[start]
backtrack(start + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = nums[i], nums[start] # backtrack
__SCREAMING_SNAKE_CASE = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a__ : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 553 |
"""simple docstring"""
from __future__ import annotations
import math
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int ) -> None:
__SCREAMING_SNAKE_CASE = size
# approximate the overall size of segment tree with given value
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : int ) -> int:
return idx * 2
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int ) -> int:
return idx * 2 + 1
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] ) -> None:
if left_element == right_element:
__SCREAMING_SNAKE_CASE = a[left_element - 1]
else:
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.build(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__SCREAMING_SNAKE_CASE = val
if left_element != right_element:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return True
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.update(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
return True
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int | float:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
__SCREAMING_SNAKE_CASE = self.query(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.query(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return max(UpperCAmelCase__ , UpperCAmelCase__ )
def __str__( self : int ) -> str:
return str([self.query(1 , 1 , self.size , UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a__ : Tuple = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a__ : Dict = 1_5
a__ : int = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 553 | 1 |
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : List[Any] , _A : Any , _A : Tuple ):
_UpperCamelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCamelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_SCREAMING_SNAKE_CASE ) )
]
_UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCamelCase = (1 << len(_SCREAMING_SNAKE_CASE )) - 1
def UpperCamelCase_ ( self : str , _A : int , _A : Any ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCamelCase = self.count_ways_until(_SCREAMING_SNAKE_CASE , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCamelCase = total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase_ ( self : str , _A : str ):
# Store the list of persons for each task
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in task_performed[i]:
self.task[j].append(_SCREAMING_SNAKE_CASE )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowerCAmelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 10 | """simple docstring"""
UpperCamelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 473 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : List[str] = logging.get_logger(__name__)
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[Any] = "upernet"
def __init__( self : Any , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : str=[1, 2, 3, 6] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=0.4 , __lowerCAmelCase : str=3_8_4 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=1 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=2_5_5 , **__lowerCAmelCase : str , ):
super().__init__(**UpperCamelCase_ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__snake_case = CONFIG_MAPPING["resnet"](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__snake_case = backbone_config.get('model_type' )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(UpperCamelCase_ )
__snake_case = backbone_config
__snake_case = hidden_size
__snake_case = initializer_range
__snake_case = pool_scales
__snake_case = use_auxiliary_head
__snake_case = auxiliary_loss_weight
__snake_case = auxiliary_in_channels
__snake_case = auxiliary_channels
__snake_case = auxiliary_num_convs
__snake_case = auxiliary_concat_input
__snake_case = loss_ignore_index
def lowercase__ ( self : Union[str, Any] ):
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( a , a=None ):
require_version(deps[pkg] , a )
| 427 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None , ):
if attention_mask is None:
__a : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCamelCase:
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE__ : str=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , ):
'''simple docstring'''
__a : Dict = parent
__a : Optional[int] = batch_size
__a : int = seq_length
__a : Any = is_training
__a : Tuple = use_labels
__a : Dict = vocab_size
__a : List[str] = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : List[str] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : str = eos_token_id
__a : str = pad_token_id
__a : Optional[Any] = bos_token_id
__a : Union[str, Any] = initializer_range
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : Optional[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__a : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , )
__a : Optional[int] = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a , __a : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 2_0
__a : int = model_class_name(SCREAMING_SNAKE_CASE__ )
__a : str = model.encode(inputs_dict['input_ids'] )
__a , __a : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : List[str] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__a : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Dict = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : Dict = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = 2_0
__a : List[str] = model_class_name(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Dict = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : int = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Dict = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
__a : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 99
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__a : Any = input_ids.shape[0]
__a : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a , __a , __a : List[Any] = self._get_config_and_data()
__a : str = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ )
__a : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__a : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__a : Optional[Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__a : Optional[int] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : int = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__a : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__a : List[str] = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
__a : Any = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Any = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[Any] = FlaxBlenderbotModelTester(self )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
__a : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
__a : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__a : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest('JIT Enabled' ):
__a : List[Any] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : str = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : int = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
__a : Dict = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__a : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__a : Tuple = ['Sam']
__a : int = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='jax' )
__a : Any = model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = 'Sam is a great name. It means "sun" in Gaelic.'
__a : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
assert generated_txt[0].strip() == tgt_text
| 47 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( A__ ):
def __init__( self : Tuple , *__a : Tuple , __a : Dict=None , __a : List[str]=None , **__a : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase__: str = eval_examples
lowerCamelCase__: Optional[int] = post_process_function
def lowerCamelCase_ ( self : str , __a : Optional[Dataset] = None , __a : List[Any]=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Tuple , ):
'''simple docstring'''
lowerCamelCase__: Tuple = gen_kwargs.copy()
lowerCamelCase__: Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
lowerCamelCase__: Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
lowerCamelCase__: Optional[Any] = gen_kwargs
lowerCamelCase__: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__: Union[str, Any] = self.get_eval_dataloader(__a )
lowerCamelCase__: Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Optional[int] = self.compute_metrics
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: Dict = time.time()
lowerCamelCase__: Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: Any = eval_loop(
__a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase__: Tuple = self.post_process_function(__a , __a , __a )
lowerCamelCase__: List[Any] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase__: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__: List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def lowerCamelCase_ ( self : str , __a : List[str] , __a : List[Any] , __a : Tuple=None , __a : str = "test" , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = gen_kwargs.copy()
lowerCamelCase__: Optional[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Any = self.compute_metrics
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: int = time.time()
lowerCamelCase__: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: List[str] = eval_loop(
__a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__: str = self.post_process_function(__a , __a , __a , """predict""" )
lowerCamelCase__: str = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 306 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE : Any = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=8 ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A__ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : int=5_12 , __UpperCamelCase : Optional[Any]=5_12 ) -> List[str]:
"""simple docstring"""
A__ : str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A__ : str = np.array(pil_image.convert('''RGB''' ) )
A__ : str = arr.astype(np.floataa ) / 1_27.5 - 1
A__ : List[str] = np.transpose(__UpperCamelCase , [2, 0, 1] )
A__ : List[str] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
A__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# get the original timestep using init_timestep
A__ : Tuple = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
A__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
A__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}" )
A__ : List[str] = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
A__ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A__ : int = image
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ )
]
A__ : int = torch.cat(UpperCamelCase__ , dim=0 )
else:
A__ : str = self.movq.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ )
A__ : int = self.movq.config.scaling_factor * init_latents
A__ : List[Any] = torch.cat([init_latents] , dim=0 )
A__ : str = init_latents.shape
A__ : str = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
A__ : str = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : str = init_latents
return latents
def __snake_case ( self , UpperCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A__ : Dict = torch.device(F"cuda:{gpu_id}" )
A__ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
A__ : Any = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A__ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
A__ : Tuple = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
A__ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 100 , UpperCamelCase__ = 4.0 , UpperCamelCase__ = 0.3 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
A__ : Any = self._execution_device
A__ : str = guidance_scale > 1.0
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
A__ : Tuple = image_embeds.shape[0]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
A__ : List[str] = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
A__ : int = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
A__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = [image]
if not all(isinstance(UpperCamelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(UpperCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
A__ : str = torch.cat([prepare_image(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for i in image] , dim=0 )
A__ : List[Any] = image.to(dtype=image_embeds.dtype , device=UpperCamelCase__ )
A__ : List[str] = self.movq.encode(UpperCamelCase__ )['''latents''']
A__ : Dict = latents.repeat_interleave(UpperCamelCase__ , dim=0 )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
A__ : Optional[int] = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A__ : Any = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
A__ : List[str] = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : List[str] = {'''image_embeds''': image_embeds}
A__ : Optional[int] = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
A__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
A__ : Optional[int] = noise_pred.chunk(2 )
A__ : List[str] = variance_pred.chunk(2 )
A__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A__ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A__ : str = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0]
# post-processing
A__ : List[Any] = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A__ : Any = image * 0.5 + 0.5
A__ : List[str] = image.clamp(0 , 1 )
A__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : Tuple = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ ) | 710 |
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""") | 55 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableUnCLIPPipeline
__a = TEXT_TO_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_BATCH_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__a = False
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[str]= 32
SCREAMING_SNAKE_CASE__: Tuple= embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=lowerCAmelCase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= AutoencoderKL()
SCREAMING_SNAKE_CASE__: str= {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Dict:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: int= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[str]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__: str= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= pipe('''anime turle''' , generator=lowerCAmelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__: Tuple= StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__: Dict= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: str= torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 64 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ):
super().__init__(**__snake_case )
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__snake_case , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE : Any = get_size_dict(__snake_case , default_to_square=__snake_case , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Union[str, Any] = size
_SCREAMING_SNAKE_CASE : List[str] = resample
_SCREAMING_SNAKE_CASE : str = do_center_crop
_SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
_SCREAMING_SNAKE_CASE : Tuple = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
_SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE : Optional[int] = do_convert_rgb
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : int = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__snake_case , param_name="""size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__snake_case , param_name="""crop_size""" , default_to_square=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE : Optional[int] = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : str = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : Tuple = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
_SCREAMING_SNAKE_CASE : int = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 533 | 0 |
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase ):
UpperCamelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
UpperCamelCase__ = kwargs.pop("""main_process_only""" , __lowerCAmelCase )
UpperCamelCase__ = kwargs.pop("""in_order""" , __lowerCAmelCase )
if self.isEnabledFor(__lowerCAmelCase ):
if self._should_log(__lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif in_order:
UpperCamelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase__ , UpperCamelCase__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
state.wait_for_everyone()
def _UpperCamelCase (a__ :str , a__ :str = None ):
"""simple docstring"""
if log_level is None:
UpperCamelCase__ = os.environ.get("""ACCELERATE_LOG_LEVEL""" , a__ )
UpperCamelCase__ = logging.getLogger(a__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(a__ , {} )
| 548 |
from typing import Any
def _UpperCamelCase (a__ :list ):
"""simple docstring"""
if not input_list:
return []
UpperCamelCase__ = [input_list.count(a__ ) for value in input_list]
UpperCamelCase__ = max(a__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 | 1 |
'''simple docstring'''
import sys
import turtle
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: int ,):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCamelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCamelCase_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 28 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 1 |
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : Any ):
lowercase__ : Dict = []
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
lowercase__ : Optional[int] = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase__ ) , """Postfix""".center(lowerCamelCase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("""""".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("""""".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def _lowerCamelCase ( lowerCamelCase__ : List[str] ):
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = """)""" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Union[str, Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__snake_case = input('\nEnter an Infix Equation = ') # Input an Infix equation
__snake_case = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 128 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Tuple = '''philschmid/bart-large-cnn-samsum'''
_a : Optional[Any] = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
_a : Union[str, Any] = '''summarizer'''
_a : List[Any] = AutoTokenizer
_a : Optional[Any] = AutoModelForSeqaSeqLM
_a : Any = ['''text''']
_a : List[str] = ['''text''']
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.pre_processor(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
return self.model.generate(**lowerCamelCase__ )[0]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) | 128 | 1 |
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , a__):
A__ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''')
if len(a__) != 0:
A__ = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(a__) != cols:
raise error
for value in row:
if not isinstance(a__ , (int, float)):
raise error
A__ = rows
else:
A__ = []
def snake_case_ ( self):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def snake_case_ ( self):
return len(self.rows)
@property
def snake_case_ ( self):
return len(self.rows[0])
@property
def snake_case_ ( self):
return (self.num_rows, self.num_columns)
@property
def snake_case_ ( self):
return self.order[0] == self.order[1]
def snake_case_ ( self):
A__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(a__)
def snake_case_ ( self):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def snake_case_ ( self):
return bool(self.determinant())
def snake_case_ ( self , a__ , a__):
A__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(a__).determinant()
def snake_case_ ( self , a__ , a__):
if (row + column) % 2 == 0:
return self.get_minor(a__ , a__)
return -1 * self.get_minor(a__ , a__)
def snake_case_ ( self):
return Matrix(
[
[self.get_minor(a__ , a__) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def snake_case_ ( self):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def snake_case_ ( self):
A__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(a__)
def snake_case_ ( self):
A__ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''')
return self.adjugate() * (1 / determinant)
def __repr__( self):
return str(self.rows)
def __str__( self):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(a__) for value in row]) + '''.]'''
for row in self.rows
])
+ "]"
)
def snake_case_ ( self , a__ , a__ = None):
A__ = TypeError('''Row must be a list containing all ints and/or floats''')
if not isinstance(a__ , a__):
raise type_error
for value in row:
if not isinstance(a__ , (int, float)):
raise type_error
if len(a__) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''')
if position is None:
self.rows.append(a__)
else:
A__ = self.rows[0:position] + [row] + self.rows[position:]
def snake_case_ ( self , a__ , a__ = None):
A__ = TypeError(
'''Column must be a list containing all ints and/or floats''')
if not isinstance(a__ , a__):
raise type_error
for value in column:
if not isinstance(a__ , (int, float)):
raise type_error
if len(a__) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''')
if position is None:
A__ = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
A__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , a__):
if not isinstance(a__ , a__):
return NotImplemented
return self.rows == other.rows
def __ne__( self , a__):
return not self == other
def __neg__( self):
return self * -1
def __add__( self , a__):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , a__):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , a__):
if isinstance(a__ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(a__ , a__):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''')
return Matrix(
[
[Matrix.dot_product(a__ , a__) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''')
def __pow__( self , a__):
if not isinstance(a__ , a__):
raise TypeError('''A Matrix can only be raised to the power of an int''')
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''')
A__ = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def snake_case_ ( cls , a__ , a__):
return sum(row[i] * column[i] for i in range(len(a__)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 |
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 0
while number > 0:
A__ = number % 1_0
sum_of_digits += last_digit
A__ = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( UpperCamelCase_ : int = 1_0_0 )-> int:
A__ = factorial(UpperCamelCase_ )
A__ = split_and_add(UpperCamelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 632 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'google/electra-small-generator': 5_1_2,
'google/electra-base-generator': 5_1_2,
'google/electra-large-generator': 5_1_2,
'google/electra-small-discriminator': 5_1_2,
'google/electra-base-discriminator': 5_1_2,
'google/electra-large-discriminator': 5_1_2,
}
_lowerCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __UpperCAmelCase( lowercase_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ElectraTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
A_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_ ) != tokenize_chinese_chars
):
A_ : int = getattr(lowerCamelCase_ , normalizer_state.pop('''type''' ) )
A_ : List[Any] = do_lower_case
A_ : int = strip_accents
A_ : Dict = tokenize_chinese_chars
A_ : Optional[Any] = normalizer_class(**lowerCamelCase_ )
A_ : int = do_lower_case
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ):
"""simple docstring"""
A_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : Tuple = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 713 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__UpperCamelCase )
lowerCAmelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__UpperCamelCase )
env_command_parser(subparsers=__UpperCamelCase )
launch_command_parser(subparsers=__UpperCamelCase )
tpu_command_parser(subparsers=__UpperCamelCase )
test_command_parser(subparsers=__UpperCamelCase )
# Let's go
lowerCAmelCase = parser.parse_args()
if not hasattr(__UpperCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__UpperCamelCase )
if __name__ == "__main__":
main()
| 4 |
def UpperCamelCase( __UpperCamelCase : int = 10**12 ):
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A : Tuple = True
except (ImportError, AttributeError):
A : Union[str, Any] = object
def lowercase_ ( *_A : List[Any] , **_A : Any ):
"""simple docstring"""
pass
A : int = False
A : List[Any] = logging.get_logger("transformers-cli/serving")
def lowercase_ ( _A : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A_ , args.host , args.port , args.workers )
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
A__ = 42
A__ = 42
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
A__ = 42
class _lowercase ( _A):
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : ArgumentParser ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=UpperCamelCase__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=UpperCamelCase__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=UpperCamelCase__ , default=8888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=UpperCamelCase__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=UpperCamelCase__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=UpperCamelCase__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=UpperCamelCase__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=UpperCamelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Optional[Any] , __lowerCamelCase : Pipeline , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pipeline
lowerCamelCase__ : List[Any] = host
lowerCamelCase__ : Union[str, Any] = port
lowerCamelCase__ : int = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f"Serving model over {host}:{port}" )
lowerCamelCase__ : Optional[int] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["POST"] , ),
] , timeout=600 , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : str = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ):
'''simple docstring'''
try:
lowerCamelCase__ : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase__ )
if return_ids:
lowerCamelCase__ : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(UpperCamelCase__ )} )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , __lowerCamelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , ):
'''simple docstring'''
try:
lowerCamelCase__ : Dict = self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return ServeDeTokenizeResult(model="" , text=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(UpperCamelCase__ )} )
async def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any]=Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase__ : Any = self._pipeline(UpperCamelCase__ )
return ServeForwardResult(output=UpperCamelCase__ )
except Exception as e:
raise HTTPException(500 , {"error": str(UpperCamelCase__ )} )
| 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
if k in (0.0_4, 0.0_6):
lowerCamelCase__ : int = k
lowerCamelCase__ : List[str] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : str ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 )
lowerCamelCase__ , lowerCamelCase__ : Any = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : List[Any] = img.copy()
lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase )
lowerCamelCase__ : Dict = dx**2
lowerCamelCase__ : Optional[Any] = dy**2
lowerCamelCase__ : int = dx * dy
lowerCamelCase__ : Union[str, Any] = 0.0_4
lowerCamelCase__ : Any = self.window_size // 2
for y in range(__lowerCamelCase , h - offset ):
for x in range(__lowerCamelCase , w - offset ):
lowerCamelCase__ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : List[str] = wxx + wyy
lowerCamelCase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A : Tuple = HarrisCorner(0.0_4, 3)
A, A : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 5 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase_ : Dict = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def UpperCamelCase ( _A : Dict )-> Optional[Any]:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
UpperCAmelCase_ : str = parser.parse_args()
if args.check_lib:
UpperCAmelCase_ : int = importlib.import_module("transformers")
UpperCAmelCase_ : Tuple = Path(transformers_module.__file__).parent
else:
UpperCAmelCase_ : Optional[int] = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 491 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = params
A__ = np.array(UpperCAmelCase__ )
A__ = np.array([len(UpperCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , UpperCAmelCase__ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def __A ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __A ( self ):
A__ = self.params.max_model_input_size
A__ = self.lengths > max_len
logger.info(F"""Splitting {sum(UpperCAmelCase__ )} too long sequences.""" )
def divide_chunks(UpperCAmelCase__ , UpperCAmelCase__ ):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )]
A__ = []
A__ = []
if self.params.mlm:
A__ , A__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
A__ , A__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A__ = np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ )
if sub_s[-1] != sep_id:
A__ = np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase__ )
new_tok_ids.extend(UpperCAmelCase__ )
new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] )
A__ = np.array(UpperCAmelCase__ )
A__ = np.array(UpperCAmelCase__ )
def __A ( self ):
A__ = len(self )
A__ = self.lengths > 11
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __A ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ = self.params.special_tok_ids["unk_token"]
A__ = len(self )
A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ = (unk_occs / self.lengths) < 0.5
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __A ( self ):
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __A ( self , UpperCAmelCase__ ):
A__ = [t[0] for t in batch]
A__ = [t[1] for t in batch]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
# Max for paddings
A__ = max(UpperCAmelCase__ )
# Pad token ids
if self.params.mlm:
A__ = self.params.special_tok_ids["pad_token"]
else:
A__ = self.params.special_tok_ids["unk_token"]
A__ = [list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase__ )
assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ )
A__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ = torch.tensor(UpperCAmelCase__ ) # (bs)
return tk_t, lg_t
| 491 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = random.Random()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=1.0 , snake_case_ : List[str]=None , snake_case_ : str=None ):
if rng is None:
snake_case__ : str = global_rng
snake_case__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __A : Union[str, Any] , __A : List[Any]=7 , __A : int=4_0_0 , __A : Union[str, Any]=2_0_0_0 , __A : Union[str, Any]=1_0 , __A : Optional[Any]=1_6_0 , __A : int=8 , __A : Dict=0.0 , __A : Optional[int]=4_0_0_0 , __A : Optional[int]=False , __A : Union[str, Any]=True , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = min_seq_length
snake_case__ : Union[str, Any] = max_seq_length
snake_case__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Optional[Any] = padding_value
snake_case__ : List[str] = sampling_rate
snake_case__ : Optional[int] = return_attention_mask
snake_case__ : int = do_normalize
snake_case__ : Tuple = feature_size
snake_case__ : int = chunk_length
snake_case__ : Any = hop_length
def _lowercase ( self : int ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self : List[str] , __A : Any=False , __A : Dict=False ):
def _flatten(__A : Any ):
return list(itertools.chain(*__A ) )
if equal_length:
snake_case__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Tuple = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = WhisperFeatureExtractor if is_speech_available() else None
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = WhisperFeatureExtractionTester(self )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
snake_case__ : List[str] = self.feature_extraction_class.from_pretrained(__A )
snake_case__ : Tuple = feat_extract_first.to_dict()
snake_case__ : List[Any] = feat_extract_second.to_dict()
snake_case__ : int = feat_extract_first.mel_filters
snake_case__ : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = os.path.join(__A , "feat_extract.json" )
feat_extract_first.to_json_file(__A )
snake_case__ : List[str] = self.feature_extraction_class.from_json_file(__A )
snake_case__ : Union[str, Any] = feat_extract_first.to_dict()
snake_case__ : List[Any] = feat_extract_second.to_dict()
snake_case__ : Any = feat_extract_first.mel_filters
snake_case__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case__ : Any = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : List[str] = feature_extractor(__A , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case__ : Any = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
snake_case__ : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
snake_case__ : List[str] = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case__ : List[Any] = np.asarray(__A )
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Union[str, Any] = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test truncation required
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case__ : Optional[Any] = [np.asarray(__A ) for speech_input in speech_inputs]
snake_case__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case__ : List[Any] = [np.asarray(__A ) for speech_input in speech_inputs_truncated]
snake_case__ : Union[str, Any] = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def _lowercase ( self : Optional[Any] ):
import torch
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : List[Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case__ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ : List[str] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self : List[str] , __A : Dict ):
snake_case__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case__ : Optional[int] = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self : Union[str, Any] ):
# fmt: off
snake_case__ : List[str] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
snake_case__ : Tuple = self._load_datasamples(1 )
snake_case__ : Any = WhisperFeatureExtractor()
snake_case__ : List[str] = feature_extractor(__A , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __A , atol=1e-4 ) )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Dict = self._load_datasamples(1 )[0]
snake_case__ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case__ : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1e-3 ) )
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = ''''''
for i in table:
res += inp[i - 1]
return res
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
return data[1:] + data[0]
def _SCREAMING_SNAKE_CASE (A , A ) -> Dict:
"""simple docstring"""
lowercase__ = ''''''
for i in range(len(A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = int('''0b''' + data[0] + data[-1] , 2 )
lowercase__ = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = message[:4]
lowercase__ = message[4:]
lowercase__ = apply_table(A , A )
lowercase__ = xor(A , A )
lowercase__ = apply_sbox(A , temp[:4] ) # noqa: E741
lowercase__ = apply_sbox(A , temp[4:] )
lowercase__ = '''0''' * (2 - len(A )) + l # noqa: E741
lowercase__ = '''0''' * (2 - len(A )) + r
lowercase__ = apply_table(l + r , A )
lowercase__ = xor(A , A )
return temp + right
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input('Enter 10 bit key: ')
lowerCamelCase : Any = input('Enter 8 bit message: ')
lowerCamelCase : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCamelCase : Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCamelCase : List[Any] = [2, 4, 3, 1]
lowerCamelCase : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCamelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCamelCase : Tuple = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCamelCase : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCamelCase : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCamelCase : List[str] = apply_table(key, paa_table)
lowerCamelCase : List[str] = temp[:5]
lowerCamelCase : Optional[int] = temp[5:]
lowerCamelCase : List[Any] = left_shift(left)
lowerCamelCase : Union[str, Any] = left_shift(right)
lowerCamelCase : Dict = apply_table(left + right, pa_table)
lowerCamelCase : List[str] = left_shift(left)
lowerCamelCase : str = left_shift(right)
lowerCamelCase : Tuple = left_shift(left)
lowerCamelCase : Optional[int] = left_shift(right)
lowerCamelCase : Tuple = apply_table(left + right, pa_table)
# encryption
lowerCamelCase : Optional[int] = apply_table(message, IP)
lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
lowerCamelCase : Any = temp[4:] + temp[:4]
lowerCamelCase : Tuple = function(expansion, sa, sa, keya, temp)
lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCamelCase : List[str] = apply_table(CT, IP)
lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
lowerCamelCase : List[str] = temp[4:] + temp[:4]
lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 460 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 460 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , a : Optional[Any] , a : Dict=13 , a : Dict=7 , a : str=True , a : str=True , a : Dict=True , a : List[Any]=True , a : Any=99 , a : int=64 , a : str=5 , a : Any=4 , a : int=37 , a : Dict="gelu" , a : Optional[int]=0.1 , a : str=0.1 , a : int=512 , a : Any=16 , a : Tuple=2 , a : Tuple=0.02 , a : Union[str, Any]=3 , a : Optional[Any]=4 , a : str=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = vocab_size - 1
def _UpperCAmelCase ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : Tuple , a : List[str] , a : List[str] , a : str ) -> str:
SCREAMING_SNAKE_CASE = GPTNeoXModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a )
SCREAMING_SNAKE_CASE = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[int] , a : Any , a : Tuple , a : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = GPTNeoXModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : str , a : Any , a : List[str] , a : Dict , a : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : int , a : Any , a : int , a : Dict , a : Any ) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Union[str, Any] , a : List[Any] , a : Optional[int] , a : Any , a : str ) -> Tuple:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[Any] , a : int , a : Dict , a : Optional[Any] , a : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Any , a : Optional[Any] , a : Optional[int] , a : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , use_cache=a )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , output_hidden_states=a )
SCREAMING_SNAKE_CASE = output_from_no_past["""hidden_states"""][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCAmelCase ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def _UpperCAmelCase ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE = GPTNeoXModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _UpperCAmelCase ( self : Optional[int] ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCAmelCase ( self : Dict ) -> int:
# This regression test was failing with PyTorch < 1.3
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCAmelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCAmelCase ( self : List[str] ) -> str:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCAmelCase ( self : Tuple , a : int ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {"""type""": scaling_type, """factor""": 10.0}
SCREAMING_SNAKE_CASE = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
SCREAMING_SNAKE_CASE = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
SCREAMING_SNAKE_CASE = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
SCREAMING_SNAKE_CASE = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
SCREAMING_SNAKE_CASE = model.generate(**a , do_sample=a , max_new_tokens=20 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 450 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , a : nn.Module , a : int ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase ( self : Tuple , a : str , *a : List[Any] , **a : List[Any] ) -> List[Any]:
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''bigscience/bloom-1b7'''
# Constant values
a__ = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
a__ = '''Hello my name is'''
a__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
a__ = 1_0
def _UpperCAmelCase ( self : Dict ) -> str:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Tuple ) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , """quantization_config""" ) )
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def _UpperCAmelCase ( self : Any ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase ( self : Tuple ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def _UpperCAmelCase ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _UpperCAmelCase ( self : Optional[Any] ) -> int:
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=a , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE = """t5-small"""
SCREAMING_SNAKE_CASE = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE = """Translate in German: Hello, my dog is cute"""
def _UpperCAmelCase ( self : Any ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> int:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
SCREAMING_SNAKE_CASE = modules
def _UpperCAmelCase ( self : int ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = """bigscience/bloom-560m"""
SCREAMING_SNAKE_CASE = """t5-small"""
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map="""auto""" )
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Dict ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ) -> str:
super().setUp()
def _UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ) -> Optional[int]:
super().setUp()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
super().setUp()
def _UpperCAmelCase ( self : Any ) -> Tuple:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''gpt2-xl'''
a__ = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 450 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = (DEISMultistepScheduler,)
lowerCAmelCase__ = (('num_inference_steps', 25),)
def _UpperCAmelCase ( self , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**UpperCAmelCase )
return config
def _UpperCAmelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_lowercase = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_lowercase = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , UpperCAmelCase=None , **UpperCAmelCase ):
'''simple docstring'''
if scheduler is None:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = 10
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = model(UpperCAmelCase , UpperCAmelCase )
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , """set_timesteps""" ):
_lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
_lowercase = scheduler.timesteps[5]
_lowercase = scheduler.timesteps[6]
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowercase = self.full_loop(scheduler=UpperCAmelCase )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
_lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowercase = DEISMultistepScheduler.from_config(scheduler.config )
_lowercase = self.full_loop(scheduler=UpperCAmelCase )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type="""deis""" , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
_lowercase = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.full_loop()
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.full_loop(prediction_type="""v_prediction""" )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = 10
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = model(UpperCAmelCase , UpperCAmelCase )
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 398 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A_: Union[str, Any] = 5_0000
A_: str = 5000
A_ , A_: int = os.path.split(__file__)
A_: str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
for i in range(0 ,len(_A ) ,_A ):
_lowercase = dataset[i : i + batch_size]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(0 ,_A ,_A ):
_lowercase = dataset[i : i + batch_size]
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowercase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowercase = generate_example_dataset(
os.path.join(_A ,"""dataset.arrow""" ) ,_A ,num_examples=_A ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_A ) )
_lowercase = func(_A ,**_A )
print("""shuffling dataset""" )
_lowercase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_A ) )
_lowercase = func(
_A ,**_A )
with open(_A ,"""wb""" ) as f:
f.write(json.dumps(_A ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398 | 1 |
from importlib import import_module
from .logging import get_logger
lowercase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , _lowercase , getattr(_lowercase , _lowercase ) )
__a : Optional[Any] = module._original_module if isinstance(_lowercase , _PatchedModuleObj ) else module
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = []
def __init__(self , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
__a : List[str] = obj
__a : List[Any] = target
__a : Any = new
__a : List[str] = target.split(""".""" )[0]
__a : Any = {}
__a : str = attrs or []
def __enter__(self ):
'''simple docstring'''
*__a , __a : int = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_lowercase ) ):
try:
__a : Optional[Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__a : List[Any] = getattr(self.obj , _lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__a : Tuple = obj_attr
# patch at top level
setattr(self.obj , _lowercase , _PatchedModuleObj(_lowercase , attrs=self.attrs ) )
__a : List[Any] = getattr(self.obj , _lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_lowercase , _lowercase , _PatchedModuleObj(getattr(_lowercase , _lowercase , _lowercase ) , attrs=self.attrs ) )
__a : Optional[int] = getattr(_lowercase , _lowercase )
# finally set the target attribute
setattr(_lowercase , _lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__a : Dict = getattr(import_module(""".""".join(_lowercase ) ) , _lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _lowercase ) is attr_value:
__a : Optional[int] = getattr(self.obj , _lowercase )
setattr(self.obj , _lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__a : Any = globals()["""__builtins__"""][target_attr]
setattr(self.obj , _lowercase , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__(self , *_lowercase ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , _lowercase , self.original.pop(_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase__(self ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
lowerCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
lowerCAmelCase, lowerCAmelCase = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
lowerCAmelCase = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
lowerCAmelCase = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main() | 344 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : int = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""GLPNFeatureExtractor"""]
a_ : Union[str, Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A :
__snake_case = MBartConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=20, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCAmelCase_ = prepare_mbart_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModel(config=UpperCamelCase__ ).get_decoder()
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = inputs_dict['''head_mask''']
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, use_cache=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case = 'facebook/mbart-large-en-ro'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.translate_src_text(**UpperCamelCase__ )
self.assertListEqual(self.expected_text, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer(self.src_text, **UpperCamelCase__, return_tensors='''tf''' )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 431 | 0 |
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = [], []
while len(_snake_case ) > 1:
lowerCAmelCase , lowerCAmelCase = min(_snake_case ), max(_snake_case )
start.append(_snake_case )
end.append(_snake_case )
collection.remove(_snake_case )
collection.remove(_snake_case )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase_ =input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 33 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for example in examples:
lowerCAmelCase = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __snake_case ( self ):
lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowerCAmelCase = pipeline(
'''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 33 | 1 |
"""simple docstring"""
import math
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
while num > 0:
_UpperCamelCase = num % 8
_UpperCamelCase = octal + (remainder * math.floor(math.pow(10, __snake_case ) ))
counter += 1
_UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__snake_case )}'''
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 19 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'google/ncsnpp-celebahq-256'
UpperCAmelCase__ = UNetaDModel.from_pretrained(__a )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 146 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'layoutlmv3'
def __init__( self : List[str] , a : Optional[Any]=50_265 , a : Dict=768 , a : Optional[Any]=12 , a : Optional[Any]=12 , a : Optional[int]=3_072 , a : Union[str, Any]="gelu" , a : List[str]=0.1 , a : Optional[int]=0.1 , a : Tuple=512 , a : Tuple=2 , a : int=0.0_2 , a : Dict=1E-5 , a : Optional[Any]=1 , a : Optional[int]=0 , a : Optional[Any]=2 , a : Optional[Any]=1_024 , a : Optional[int]=128 , a : Optional[int]=128 , a : Tuple=True , a : Union[str, Any]=32 , a : List[str]=128 , a : Optional[int]=64 , a : Optional[Any]=256 , a : Dict=True , a : Union[str, Any]=True , a : List[Any]=True , a : List[Any]=224 , a : Any=3 , a : int=16 , a : Optional[Any]=None , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=a , hidden_size=a , num_hidden_layers=a , num_attention_heads=a , intermediate_size=a , hidden_act=a , hidden_dropout_prob=a , attention_probs_dropout_prob=a , max_position_embeddings=a , type_vocab_size=a , initializer_range=a , layer_norm_eps=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowerCAmelCase__ : Any = max_ad_position_embeddings
lowerCAmelCase__ : List[Any] = coordinate_size
lowerCAmelCase__ : Tuple = shape_size
lowerCAmelCase__ : Optional[Any] = has_relative_attention_bias
lowerCAmelCase__ : Tuple = rel_pos_bins
lowerCAmelCase__ : Optional[Any] = max_rel_pos
lowerCAmelCase__ : str = has_spatial_attention_bias
lowerCAmelCase__ : Union[str, Any] = rel_ad_pos_bins
lowerCAmelCase__ : str = max_rel_ad_pos
lowerCAmelCase__ : str = text_embed
lowerCAmelCase__ : Optional[int] = visual_embed
lowerCAmelCase__ : List[str] = input_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class A__ ( __magic_name__ ):
lowercase = version.parse('1.12' )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 1E-5
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
def _lowerCamelCase ( self : Optional[int] , a : "ProcessorMixin" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Dict = processor.tokenizer.num_special_tokens_to_add(a )
lowerCAmelCase__ : List[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : int = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase__ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase__ : Optional[int] = self._generate_dummy_images(a , a , a , a )
lowerCAmelCase__ : List[Any] = dict(
processor(
a , text=a , boxes=a , return_tensors=a , ) )
return inputs | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =DebertaVaTokenizer
lowerCamelCase : Dict =DebertaVaTokenizerFast
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Optional[int] =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = DebertaVaTokenizer(a , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = '''this is a test'''
__lowerCamelCase = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(a ) , 3_00_01 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = '''This is a test'''
__lowerCamelCase = [13, 1, 43_98, 25, 21, 12_89]
__lowerCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = DebertaVaTokenizer(a , keep_accents=a )
__lowerCamelCase = DebertaVaTokenizerFast(a , keep_accents=a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = DebertaVaTokenizer(a )
__lowerCamelCase = tokenizer.encode('''sequence builders''' )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 546 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase =object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase =object()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
__lowerCamelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
def replace(UpperCamelCase__ , UpperCamelCase__ ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def __lowerCAmelCase ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = _get_partition_rules()
__lowerCamelCase = _replacement_rules(UpperCamelCase__ )
__lowerCamelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
__lowerCamelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 546 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Tuple =get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__file__).parent / '''model_card_template.md'''
SCREAMING_SNAKE_CASE__ : Optional[int] =uuida().hex
SCREAMING_SNAKE_CASE__ : List[Any] =os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ : List[Any] =HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = None ) ->Dict:
_lowerCamelCase : List[Any] = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
return ua
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) ->Optional[int]:
if token is None:
_lowerCamelCase : List[Any] = HfFolder.get_token()
if organization is None:
_lowerCamelCase : Union[str, Any] = whoami(SCREAMING_SNAKE_CASE_ )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE_ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
_lowerCamelCase : List[Any] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , '''hub_token''' ) else None
_lowerCamelCase : int = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
_lowerCamelCase : Optional[Any] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) ->int:
if resolved_file is None or commit_hash is not None:
return commit_hash
_lowerCamelCase : Dict = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() )
_lowerCamelCase : int = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE_ )
if search is None:
return None
_lowerCamelCase : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
SCREAMING_SNAKE_CASE__ : str =os.path.join(hf_cache_home, 'diffusers')
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) ->str:
if new_cache_dir is None:
_lowerCamelCase : int = DIFFUSERS_CACHE
if old_cache_dir is None:
_lowerCamelCase : Dict = old_diffusers_cache
_lowerCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
_lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_lowerCamelCase : List[str] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
SCREAMING_SNAKE_CASE__ : int =os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
SCREAMING_SNAKE_CASE__ : Any =0
else:
with open(cache_version_file) as f:
try:
SCREAMING_SNAKE_CASE__ : List[Any] =int(f.read())
except ValueError:
SCREAMING_SNAKE_CASE__ : Optional[int] =0
if cache_version < 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) ->Optional[int]:
if variant is not None:
_lowerCamelCase : int = weights_name.split('''.''' )
_lowerCamelCase : Union[str, Any] = splits[:-1] + [variant] + splits[-1:]
_lowerCamelCase : List[Any] = ".".join(SCREAMING_SNAKE_CASE_ )
return weights_name
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , *,
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ) ->Dict:
_lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE_ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
# Load from a PyTorch checkpoint
_lowerCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
_lowerCamelCase : str = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE_ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE_ , )
try:
# 2. Load model file as usual
_lowerCamelCase : List[Any] = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 434 | class a__ :
def __init__( self : int,_A : Union[str, Any],_A : Dict,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : str = graph
self._normalize_graph(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def __UpperCamelCase ( self : Any,_A : str,_A : str ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE_ : Dict = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : Optional[int] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
SCREAMING_SNAKE_CASE_ : Dict = sources[0]
SCREAMING_SNAKE_CASE_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0,0 )
self.graph.insert(0,[0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_ : List[str] = max_input_flow
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_ : str = max_input_flow
SCREAMING_SNAKE_CASE_ : str = size - 1
def __UpperCamelCase ( self : str ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = algorithm(self )
class a__ :
def __init__( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = flow_network
SCREAMING_SNAKE_CASE_ : str = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Optional[int] = flow_network.graph
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Dict = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
class a__ ( A__ ):
def __init__( self : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
# use this to save your result
SCREAMING_SNAKE_CASE_ : int = -1
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( A__ ):
def __init__( self : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_ : int = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : List[str] = [0] * self.verticies_count
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while i < len(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Any = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0,vertices_list.pop(_A ) )
SCREAMING_SNAKE_CASE_ : str = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : List[Any] = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self : Dict,_A : Tuple ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A,_A )
self.relabel(_A )
def __UpperCamelCase ( self : int,_A : Optional[Any],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = min(
self.excesses[from_index],self.graph[from_index][to_index] - self.preflow[from_index][to_index],)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self : Tuple,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : int = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : Dict = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : str = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : Optional[Any] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 216 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__A = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.abs(np.fft.fft(__UpperCamelCase ) )
__A = 2_0 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__A = get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__UpperCamelCase )
plt.show()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 215 |
"""simple docstring"""
lowercase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowercase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = start
# add current to visited
visited.append(__UpperCamelCase )
__A = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
lowercase_ = topological_sort('a', [], [])
print(sort)
| 215 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a_ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 175 |
from __future__ import annotations
def a__ ( _UpperCamelCase : int | float | str ,_UpperCamelCase : int | float | str ):
if nth_term == "":
return [""]
__lowerCamelCase = int(_UpperCamelCase )
__lowerCamelCase = int(_UpperCamelCase )
__lowerCamelCase = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"""1 / {pow(temp + 1 ,int(_UpperCamelCase ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input("""Enter the last number (nth term) of the P-Series"""))
a_ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 175 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
A_ : int = 10
A_ : Dict = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""")),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""])),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
"""id""": datasets.Value("""int64"""),
})
A_ : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase)),
} , features=lowerCamelCase , )
return dataset
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Tuple):
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """file.arrow""")
dataset.map(cache_file_name=lowerCamelCase)
return filename
# FILE_CONTENT + files
__magic_name__ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.txt"""
A_ : Any = FILE_CONTENT
with open(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase)
return filename
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
import bza
A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.txt.bz2"""
A_ : int = bytes(lowerCamelCase , """utf-8""")
with bza.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
import gzip
A_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""") / """file.txt.gz""")
A_ : Optional[Any] = bytes(lowerCamelCase , """utf-8""")
with gzip.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.txt.lz4"""
A_ : Optional[int] = bytes(lowerCamelCase , """utf-8""")
with lza.frame.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase , """w""") as archive:
archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : str):
import tarfile
A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
import lzma
A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """file.txt.xz"""
A_ : Any = bytes(lowerCamelCase , """utf-8""")
with lzma.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[str]):
import zipfile
A_ : str = tmp_path_factory.mktemp("""data""") / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int]):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A_ : str = tmp_path_factory.mktemp("""data""") / """file.txt.zst"""
A_ : Tuple = bytes(lowerCamelCase , """utf-8""")
with zstd.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.xml"""
A_ : Optional[int] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""")
with open(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase)
return filename
__magic_name__ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__magic_name__ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__magic_name__ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__magic_name__ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__magic_name__ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : List[Any] = datasets.Dataset.from_dict(lowerCamelCase)
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.arrow""")
dataset.map(cache_file_name=lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : int = str(tmp_path_factory.mktemp("""data""") / """dataset.sqlite""")
with contextlib.closing(sqlitea.connect(lowerCamelCase)) as con:
A_ : Optional[int] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""")
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset.csv""")
with open(lowerCamelCase , """w""" , newline="""""") as f:
A_ : Any = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : str = str(tmp_path_factory.mktemp("""data""") / """dataset2.csv""")
with open(lowerCamelCase , """w""" , newline="""""") as f:
A_ : str = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any]):
import bza
A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.csv.bz2"""
with open(lowerCamelCase , """rb""") as f:
A_ : Optional[int] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase , """wb""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Dict):
A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""")))
f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""")))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.parquet""")
A_ : str = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
})
with open(lowerCamelCase , """wb""") as f:
A_ : List[Any] = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase)
A_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase))] for k in DATA[0]} , schema=lowerCamelCase)
writer.write_table(lowerCamelCase)
writer.close()
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""") / """dataset.json""")
A_ : List[str] = {"""data""": DATA}
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset.json""")
A_ : Dict = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl""")
with open(lowerCamelCase , """w""") as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset2.jsonl""")
with open(lowerCamelCase , """w""") as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset_312.jsonl""")
with open(lowerCamelCase , """w""") as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset-str.jsonl""")
with open(lowerCamelCase , """w""") as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase) + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[Any]):
import gzip
A_ : List[str] = str(tmp_path_factory.mktemp("""data""") / """dataset.txt.gz""")
with open(lowerCamelCase , """rb""") as orig_file:
with gzip.open(lowerCamelCase , """wb""") as zipped_file:
zipped_file.writelines(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[Any]):
import gzip
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl.gz""")
with open(lowerCamelCase , """rb""") as orig_file:
with gzip.open(lowerCamelCase , """wb""") as zipped_file:
zipped_file.writelines(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[Any]):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]):
A_ : Tuple = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Any):
A_ : str = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Optional[Any] = ["""0""", """1""", """2""", """3"""]
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.txt""")
with open(lowerCamelCase , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Optional[int] = ["""0""", """1""", """2""", """3"""]
A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset2.txt""")
with open(lowerCamelCase , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : str):
A_ : Dict = ["""0""", """1""", """2""", """3"""]
A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.abc"""
with open(lowerCamelCase , """w""") as f:
for item in data:
f.write(item + """\n""")
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : int):
A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple):
A_ : str = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase)))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any]):
A_ : int = tmp_path_factory.mktemp("""data""") / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported.ext"""))
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported_2.ext"""))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""])
A_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset_with_unicode_new_lines.txt""")
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(lowerCamelCase)
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""")
@pytest.fixture(scope="""session""")
def lowerCamelCase ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""")
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int]):
A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""") as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase))
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase).replace(""".jpg""" , """2.jpg"""))
return path
@pytest.fixture(scope="""session""")
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : str = tmp_path_factory.mktemp("""data_dir""")
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""") as f:
f.write("""foo\n""" * 10)
with open(data_dir / """subdir""" / """test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""") as f:
f.write("""foo\n""" * 10)
with open(data_dir / """.subdir""" / """test.txt""" , """w""") as f:
f.write("""bar\n""" * 10)
return data_dir
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , "num_attention_heads" ) )
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=64 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Any=[128, 256, 384] , lowerCAmelCase_ : int=[4, 6, 8] , lowerCAmelCase_ : Optional[int]=[2, 3, 4] , lowerCAmelCase_ : List[Any]=[16, 16, 16] , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : int=[2, 2, 2] , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=2 , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : Optional[int] = padding
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Union[str, Any] = key_dim
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : str = attention_ratio
UpperCAmelCase_ : str = mlp_ratio
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : Any = initializer_range
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = LevitModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCAmelCase_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Optional[Any] = LevitForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = LevitModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason="Levit does not output attentions" )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ):
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase_ : str = outputs.hidden_states
UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCAmelCase_ : List[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : int = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : Dict = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase_ : int = problem_type["title"]
UpperCAmelCase_ : int = problem_type["num_labels"]
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : str = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
UpperCAmelCase_ : Dict = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LevitModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_ : Optional[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Dict = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 95 |
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = checkpoint
lowerCamelCase__ = {}
lowerCamelCase__ = vae_state_dict["""encoder.conv_in.weight"""]
lowerCamelCase__ = vae_state_dict["""encoder.conv_in.bias"""]
lowerCamelCase__ = vae_state_dict["""encoder.conv_out.weight"""]
lowerCamelCase__ = vae_state_dict["""encoder.conv_out.bias"""]
lowerCamelCase__ = vae_state_dict["""encoder.norm_out.weight"""]
lowerCamelCase__ = vae_state_dict["""encoder.norm_out.bias"""]
lowerCamelCase__ = vae_state_dict["""decoder.conv_in.weight"""]
lowerCamelCase__ = vae_state_dict["""decoder.conv_in.bias"""]
lowerCamelCase__ = vae_state_dict["""decoder.conv_out.weight"""]
lowerCamelCase__ = vae_state_dict["""decoder.conv_out.bias"""]
lowerCamelCase__ = vae_state_dict["""decoder.norm_out.weight"""]
lowerCamelCase__ = vae_state_dict["""decoder.norm_out.bias"""]
lowerCamelCase__ = vae_state_dict["""quant_conv.weight"""]
lowerCamelCase__ = vae_state_dict["""quant_conv.bias"""]
lowerCamelCase__ = vae_state_dict["""post_quant_conv.weight"""]
lowerCamelCase__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowerCamelCase__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
lowerCamelCase__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
lowerCamelCase__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
lowerCamelCase__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
lowerCamelCase__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCamelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowerCamelCase__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowerCamelCase__ = renew_vae_resnet_paths(__lowercase )
lowerCamelCase__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
lowerCamelCase__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
lowerCamelCase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCamelCase__ = renew_vae_resnet_paths(__lowercase )
lowerCamelCase__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
lowerCamelCase__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowerCamelCase__ = renew_vae_attention_paths(__lowercase )
lowerCamelCase__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
lowerCamelCase__ = num_up_blocks - 1 - i
lowerCamelCase__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCamelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCamelCase__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCamelCase__ = renew_vae_resnet_paths(__lowercase )
lowerCamelCase__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
lowerCamelCase__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
lowerCamelCase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCamelCase__ = renew_vae_resnet_paths(__lowercase )
lowerCamelCase__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
lowerCamelCase__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowerCamelCase__ = renew_vae_attention_paths(__lowercase )
lowerCamelCase__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def _A ( __lowercase , __lowercase , ):
"""simple docstring"""
lowerCamelCase__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
lowerCamelCase__ = io.BytesIO(r.content )
lowerCamelCase__ = OmegaConf.load(__lowercase )
lowerCamelCase__ = 512
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
lowerCamelCase__ = {}
with safe_open(__lowercase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
lowerCamelCase__ = f.get_tensor(__lowercase )
else:
lowerCamelCase__ = torch.load(__lowercase , map_location=__lowercase )["""state_dict"""]
# Convert the VAE model.
lowerCamelCase__ = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
lowerCamelCase__ = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
lowerCamelCase__ = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
__magic_name__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 718 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _A ( __lowercase , __lowercase , __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase__ , lowerCamelCase__ = True, True
lowerCamelCase__ = dfs(__lowercase , __lowercase , __lowercase , __lowercase )
return path
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = -1
for i in range(__lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCamelCase__ , lowerCamelCase__ = check_circuit_or_path(__lowercase , __lowercase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowerCamelCase__ = 1
if check == 2:
lowerCamelCase__ = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowerCamelCase__ = dfs(__lowercase , __lowercase , __lowercase )
print(__lowercase )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase__ = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase__ = 10
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 258 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__a : List[Any] = logging.get_logger(__name__)
__a : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
__a : List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : Optional[int] = {}
with open(snake_case_ ,"r" ) as file:
for line_number, line in enumerate(snake_case_ ):
lowercase__ : List[Any] = line.strip()
if line:
lowercase__ : Tuple = line.split()
lowercase__ : int = line_number
lowercase__ : Optional[int] = words[0]
lowercase__ : Optional[Any] = value
return result
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
for attribute in key.split("." ):
lowercase__ : Optional[int] = getattr(snake_case_ ,snake_case_ )
lowercase__ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
lowercase__ : Tuple = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ : str = "param"
if weight_type is not None and weight_type != "param":
lowercase__ : int = getattr(snake_case_ ,snake_case_ ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ : Tuple = hf_pointer
for attribute in hf_param_name.split("." ):
lowercase__ : Tuple = getattr(snake_case_ ,snake_case_ )
lowercase__ : Optional[int] = shape_pointer.shape
# let's reduce dimension
lowercase__ : Optional[Any] = value[0]
else:
lowercase__ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Tuple = value
elif weight_type == "weight_g":
lowercase__ : List[str] = value
elif weight_type == "weight_v":
lowercase__ : Any = value
elif weight_type == "bias":
lowercase__ : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowercase__ : int = getattr(snake_case_ ,snake_case_ )
lowercase__ : Optional[Any] = value
else:
lowercase__ : Dict = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
lowercase__ : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ : Dict = "param"
if weight_type is not None and weight_type != "param":
lowercase__ : Dict = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ : Optional[Any] = ".".join([key, hf_param_name] )
else:
lowercase__ : Optional[int] = key
lowercase__ : Tuple = value if "lm_head" in full_key else value[0]
__a : Optional[int] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ) -> List[str]:
lowercase__ : Optional[int] = False
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ : str = True
if "*" in mapped_key:
lowercase__ : List[Any] = name.split(snake_case_ )[0].split("." )[-2]
lowercase__ : Any = mapped_key.replace("*" ,snake_case_ )
if "weight_g" in name:
lowercase__ : Dict = "weight_g"
elif "weight_v" in name:
lowercase__ : Optional[Any] = "weight_v"
elif "bias" in name:
lowercase__ : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Optional[Any] = "weight"
else:
lowercase__ : Union[str, Any] = None
if hf_dict is not None:
rename_dict(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
else:
set_recursively(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
return is_used
return is_used
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : Dict = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,hf_model.config.feat_extract_norm == "group" ,)
lowercase__ : Optional[Any] = True
else:
lowercase__ : Tuple = load_wavaveca_layer(snake_case_ ,snake_case_ ,snake_case_ )
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : int = full_name.split("conv_layers." )[-1]
lowercase__ : Optional[Any] = name.split("." )
lowercase__ : int = int(items[0] )
lowercase__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
if config_path is not None:
lowercase__ : str = WavaVecaConfig.from_pretrained(snake_case_ )
else:
lowercase__ : List[Any] = WavaVecaConfig()
if is_seq_class:
lowercase__ : Optional[int] = read_txt_into_dict(snake_case_ )
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Tuple = WavaVecaForSequenceClassification(snake_case_ )
lowercase__ : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=snake_case_ ,return_attention_mask=snake_case_ ,)
feature_extractor.save_pretrained(snake_case_ )
elif is_finetuned:
if dict_path:
lowercase__ : Union[str, Any] = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Optional[Any] = target_dict.pad_index
lowercase__ : Any = target_dict.bos_index
lowercase__ : str = target_dict.eos_index
lowercase__ : List[Any] = len(target_dict.symbols )
lowercase__ : int = os.path.join(snake_case_ ,"vocab.json" )
if not os.path.isdir(snake_case_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case_ ) )
return
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
lowercase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Union[str, Any] = 0
lowercase__ : int = 1
with open(snake_case_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(snake_case_ ,snake_case_ )
lowercase__ : int = WavaVecaCTCTokenizer(
snake_case_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=snake_case_ ,)
lowercase__ : Optional[Any] = True if config.feat_extract_norm == "layer" else False
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=snake_case_ ,return_attention_mask=snake_case_ ,)
lowercase__ : int = WavaVecaProcessor(feature_extractor=snake_case_ ,tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
lowercase__ : Optional[Any] = WavaVecaForCTC(snake_case_ )
else:
lowercase__ : Optional[Any] = WavaVecaForPreTraining(snake_case_ )
if is_finetuned or is_seq_class:
lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ : Union[str, Any] = argparse.Namespace(task="audio_pretraining" )
lowercase__ : str = fairseq.tasks.setup_task(snake_case_ )
lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=snake_case_ )
lowercase__ : str = model[0].eval()
recursively_load_weights(snake_case_ ,snake_case_ ,not is_finetuned )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__a : Any = parser.parse_args()
__a : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 397 |
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Any = len(snake_case_ )
while lo < hi:
snake_case__ : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case__ : Optional[Any] = mid + 1
else:
snake_case__ : Optional[Any] = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Union[str, Any] = len(snake_case_ )
while lo < hi:
snake_case__ : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case__ : Dict = mid + 1
else:
snake_case__ : int = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[str] = 0
snake_case__ : Tuple = len(snake_case_ ) - 1
while left <= right:
snake_case__ : str = left + (right - left) // 2
snake_case__ : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case__ : int = midpoint - 1
else:
snake_case__ : Dict = midpoint + 1
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[Any] = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if right < left:
return None
snake_case__ : str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
__lowerCamelCase : str = sorted(int(item) for item in user_input.split(""","""))
__lowerCamelCase : List[Any] = int(input("""Enter a single number to be found in the list:\n"""))
__lowerCamelCase : Tuple = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 297 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase = 2**power
_UpperCAmelCase = str(__snake_case )
_UpperCAmelCase = list(__snake_case )
_UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
__a: List[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__a: List[Any] = solution(power)
print('''Sum of the digits is: ''', result) | 402 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__a: str = get_tests_dir('''fixtures/test_sentencepiece.model''')
__a: Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
__a: Tuple = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = CamembertTokenizer
_lowerCamelCase = CamembertTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = """<pad>"""
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
# fmt: off
_UpperCAmelCase = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCamelCase , ) | 402 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : str , a : int , a : str=13 , a : Any=32 , a : Dict=3 , a : List[str]=4 , a : Optional[Any]=[10, 20, 30, 40] , a : Any=[2, 2, 3, 2] , a : Tuple=True , a : str=True , a : List[Any]=37 , a : List[Any]="gelu" , a : Optional[Any]=10 , a : str=0.02 , a : Optional[int]=["stage2", "stage3", "stage4"] , a : str=[2, 3, 4] , a : int=None , )-> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_stages
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] , a : str , a : str )-> int:
"""simple docstring"""
lowercase__ = ConvNextModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : Optional[int] , a : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = ConvNextForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Dict , a : Union[str, Any] , a : Optional[int] )-> int:
"""simple docstring"""
lowercase__ = ConvNextBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = ConvNextBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Dict = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : List[Any] = True
_UpperCamelCase : str = False
_UpperCamelCase : Dict = False
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = ConvNextModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
def check_hidden_states_output(a : Optional[int] , a : Tuple , a : Any ):
lowercase__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a , a ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ConvNextModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> Dict:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase , UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
_UpperCamelCase : Dict = ConvNextConfig
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = ConvNextModelTester(self )
| 235 |
import random
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = a[left_index]
lowercase__ = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
lowercase__ , lowercase__ = a[i], a[j]
i += 1
lowercase__ , lowercase__ = a[i - 1], a[left_index]
return i - 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if left < right:
lowercase__ = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
lowercase__ , lowercase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = input('Enter numbers separated by a comma:\n' ).strip()
lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 235 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
UpperCAmelCase_ : Any = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
A_ : Optional[Any] = """nezha"""
def __init__( self , __snake_case=2_1128 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=64 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0.1 , __snake_case=0 , __snake_case=2 , __snake_case=3 , __snake_case=True , **__snake_case , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : int = max_relative_position
_SCREAMING_SNAKE_CASE : str = type_vocab_size
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
_SCREAMING_SNAKE_CASE : Dict = use_cache
| 533 |
import torch
from torch import nn
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE__ = n_token
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_proj
SCREAMING_SNAKE_CASE__ = cutoffs + [n_token]
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = self.cutoffs[0]
SCREAMING_SNAKE_CASE__ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
SCREAMING_SNAKE_CASE__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(A_ , A_ ) ) )
else:
self.out_projs.append(A_ )
self.out_layers.append(nn.Linear(A_ , A_ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(A_ , A_ ) ) )
self.out_layers.append(nn.Linear(A_ , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE__ = keep_order
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if proj is None:
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , A_ , bias=A_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , proj.t().contiguous() )
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , A_ , bias=A_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase_ ( self , A_ , A_=None , A_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE__ = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE__ = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE__ = labels != -1_00
SCREAMING_SNAKE_CASE__ = torch.zeros_like(A_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ = (
-nn.functional.log_softmax(A_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A_ )
biases.append(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE__ = torch.zeros_like(A_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
for i in range(len(A_ ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE__ = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE__ = labels.index_select(0 , A_ ) - l_idx
SCREAMING_SNAKE_CASE__ = head_logprob.index_select(0 , A_ )
SCREAMING_SNAKE_CASE__ = hidden.index_select(0 , A_ )
else:
SCREAMING_SNAKE_CASE__ = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE__ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , A_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase_ ( self , A_ ):
'''simple docstring'''
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(A_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A_ )
biases.append(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
for i in range(len(A_ ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE__ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE__ = logprob_i
return out
| 100 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
lowerCamelCase : Dict = MaskFormerConfig(backbone_config=UpperCAmelCase__)
lowerCamelCase : Dict = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 8_47
lowerCamelCase : Any = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCamelCase : str = 1_50
lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase : int = 1_71
lowerCamelCase : List[Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCamelCase : int = 1_33
lowerCamelCase : List[str] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase : str = 19
lowerCamelCase : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 65
lowerCamelCase : List[Any] = 'mapillary-vistas-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset') , 'r'))
lowerCamelCase : str = {int(UpperCAmelCase__): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight'))
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight'''))
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias'''))
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias'))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias'''))
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight'))
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias'))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias'''))
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias'''))
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias'''))
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias'''))
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias'''))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias'''))
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias'''))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias'))
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias'))
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight'))
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias'))
for i in range(3):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias'''))
# fmt: on
return rename_keys
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[str] = dct.pop(UpperCAmelCase__)
lowerCamelCase : Tuple = val
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase : str = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Union[str, Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''')
lowerCamelCase : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[:dim, :]
lowerCamelCase : Dict = in_proj_bias[: dim]
lowerCamelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : Dict = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-dim :, :
]
lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]):
# fmt: off
lowerCamelCase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''')
lowerCamelCase : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[: hidden_size, :]
lowerCamelCase : str = in_proj_bias[:config.hidden_size]
lowerCamelCase : int = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : List[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''')
lowerCamelCase : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase : Any = in_proj_bias[:config.hidden_size]
lowerCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__).raw)
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False):
lowerCamelCase : Optional[Any] = get_maskformer_config(UpperCAmelCase__)
# load original state_dict
with open(UpperCAmelCase__ , 'rb') as f:
lowerCamelCase : Tuple = pickle.load(UpperCAmelCase__)
lowerCamelCase : str = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase : Optional[Any] = create_rename_keys(UpperCAmelCase__)
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config)
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__)
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase__)
# load 🤗 model
lowerCamelCase : str = MaskFormerForInstanceSegmentation(UpperCAmelCase__)
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase__ , param.shape)
lowerCamelCase , lowerCamelCase : Optional[Any] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase__) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase : Optional[int] = 65
elif "cityscapes" in model_name:
lowerCamelCase : Any = 6_55_35
else:
lowerCamelCase : List[Any] = 2_55
lowerCamelCase : str = True if 'ade' in model_name else False
lowerCamelCase : List[Any] = MaskFormerImageProcessor(ignore_index=UpperCAmelCase__ , reduce_labels=UpperCAmelCase__)
lowerCamelCase : str = image_processor(UpperCAmelCase__ , return_tensors='pt')
lowerCamelCase : Dict = model(**UpperCAmelCase__)
print('Logits:' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase : List[str] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''')
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
image_processor.save_pretrained(UpperCAmelCase__)
if push_to_hub:
print('Pushing model and image processor to the hub...')
model.push_to_hub(F'''nielsr/{model_name}''')
image_processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 449 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = LEDTokenizer
_lowerCAmelCase : str = LEDTokenizerFast
_lowerCAmelCase : Tuple = True
def __lowercase ( self : Dict ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Dict = {"unk_token": "<unk>"}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ : List[Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ["A long paragraph for summarization."]
UpperCamelCase__ : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase__ : Any = tokenizer(text_target=SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase__ : Tuple = inputs["input_ids"]
UpperCamelCase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Optional[int] = ["Summary of the text.", "Another summary."]
UpperCamelCase__ : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [[0] * len(SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
UpperCamelCase__ : Dict = tokenizer.pad(SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = "A, <mask> AllenNLP sentence."
UpperCamelCase__ : Tuple = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) | 228 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Any =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1_6000 ) -> List[Any]:
UpperCamelCase__ : Any = int(round(sample_rate * max_length ) )
if len(__lowerCAmelCase ) <= sample_length:
return wav
UpperCamelCase__ : Any = randint(0 , len(__lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __a :
_lowerCAmelCase : Optional[str] = field(default=A__ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_lowerCAmelCase : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowerCAmelCase : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowerCAmelCase : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_lowerCAmelCase : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_lowerCAmelCase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowerCAmelCase : float = field(
default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __a :
_lowerCAmelCase : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_lowerCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowerCAmelCase : bool = field(
default=A__ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_lowerCAmelCase : bool = field(
default=A__ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_lowerCAmelCase : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCAmelCase : Optional[bool] = field(
default=A__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowerCAmelCase : bool = field(
default=A__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __lowercase ( self : int ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , SCREAMING_SNAKE_CASE , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ : Optional[int] = DatasetDict()
UpperCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ : Union[str, Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ : int = feature_extractor.model_input_names[0]
def train_transforms(__lowerCAmelCase ):
UpperCamelCase__ : str = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ : List[Any] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ : List[str] = {model_input_name: inputs.get(__lowerCAmelCase )}
UpperCamelCase__ : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowerCAmelCase ):
UpperCamelCase__ : str = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ : List[str] = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ : Optional[Any] = {model_input_name: inputs.get(__lowerCAmelCase )}
UpperCamelCase__ : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ : Optional[Any] = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ : str = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
UpperCamelCase__ : Union[str, Any] = str(__lowerCAmelCase )
UpperCamelCase__ : Any = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ : Dict = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
UpperCamelCase__ : Tuple = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids )
UpperCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : List[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : int = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Dict = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
# Initialize our trainer
UpperCamelCase__ : str = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Union[str, Any] = last_checkpoint
UpperCamelCase__ : Optional[int] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ : Optional[Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main() | 228 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class a__ ( lowercase__ ):
__magic_name__ : Dict = "rwkv"
__magic_name__ : int = {"max_position_embeddings": "context_length"}
def __init__(self : List[str], __UpperCAmelCase : Tuple=50277, __UpperCAmelCase : Dict=1024, __UpperCAmelCase : Tuple=4096, __UpperCAmelCase : Dict=32, __UpperCAmelCase : Optional[Any]=None, __UpperCAmelCase : Dict=None, __UpperCAmelCase : int=1e-5, __UpperCAmelCase : str=0, __UpperCAmelCase : Union[str, Any]=0, __UpperCAmelCase : Dict=6, __UpperCAmelCase : Dict=False, __UpperCAmelCase : Any=True, **__UpperCAmelCase : str, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = context_length
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__ )
| 700 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : int, **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : int, **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : int, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Any, *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Any, **__UpperCAmelCase : str ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Dict, **__UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Any, **__UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
| 355 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.