code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase = get_logger(__name__)
class __UpperCAmelCase :
__snake_case : Dict = "dummy_data"
__snake_case : Optional[Any] = "datasets"
__snake_case : Any = False
def __init__( self: str , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = dataset_name
_SCREAMING_SNAKE_CASE = cache_dir
_SCREAMING_SNAKE_CASE = use_local_dummy_data
_SCREAMING_SNAKE_CASE = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
# to be downloaded
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE = cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , *UpperCAmelCase_: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: int , *UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[str, Any] , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
return path
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {}
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE = single_urls
_SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
_SCREAMING_SNAKE_CASE = value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowerCAmelCase_ ) ) for url in data_url )
_SCREAMING_SNAKE_CASE = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE = [data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
def _iter_archive_members(UpperCAmelCase_: Dict ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE = path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 306 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a__ ( __A ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'umt5'
_SCREAMING_SNAKE_CASE : List[Any] = ['past_key_values']
def __init__( self , _UpperCamelCase=250112 , _UpperCamelCase=512 , _UpperCamelCase=64 , _UpperCamelCase=1024 , _UpperCamelCase=8 , _UpperCamelCase=None , _UpperCamelCase=6 , _UpperCamelCase=32 , _UpperCamelCase=128 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=1.0 , _UpperCamelCase="gated-gelu" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="T5Tokenizer" , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=0 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase_ , tokenizer_class=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = d_model
_lowercase : Tuple = d_kv
_lowercase : int = d_ff
_lowercase : Dict = num_layers
_lowercase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase : str = num_heads
_lowercase : Dict = relative_attention_num_buckets
_lowercase : Dict = relative_attention_max_distance
_lowercase : int = dropout_rate
_lowercase : List[Any] = layer_norm_epsilon
_lowercase : List[str] = initializer_factor
_lowercase : Tuple = feed_forward_proj
_lowercase : List[Any] = use_cache
_lowercase : Optional[Any] = self.feed_forward_proj.split("-" )
_lowercase : str = act_info[-1]
_lowercase : str = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
_lowercase : Union[str, Any] = "gelu_new"
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.d_model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.num_heads
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.num_layers
class a__ ( __A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowercase : Dict = "past_encoder_sequence + sequence"
_lowercase : Union[str, Any] = {0: "batch"}
_lowercase : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowercase : Dict = {0: "batch", 1: "decoder_sequence"}
_lowercase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _lowerCamelCase ( self ):
"""simple docstring"""
return 13
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 5E-4
| 250 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = '''dummy_data'''
__magic_name__ = '''datasets'''
__magic_name__ = False
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = dataset_name
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : Tuple = use_local_dummy_data
UpperCAmelCase_ : int = config
# download_callbacks take a single url as input
UpperCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase_ : Dict = str(lowerCAmelCase_ )
# to be downloaded
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._dummy_file is None:
UpperCAmelCase_ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase_ : Union[str, Any] = cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self._bucket_url is None:
UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase_ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return path
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
UpperCAmelCase_ : Optional[int] = single_urls
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
UpperCAmelCase_ : int = value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
UpperCAmelCase_ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url )
UpperCAmelCase_ : Union[str, Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
def _iter_archive_members(lowerCAmelCase_ : Dict ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase_ : str = Path(self.dummy_file ).parent
UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase_ : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = [paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : list[list[str]] = [[] for _ in range(A__)]
__UpperCamelCase : List[str] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1 or len(A__) <= key:
return input_string
for position, character in enumerate(A__):
__UpperCamelCase : List[str] = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : Optional[int] = min(A__ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(A__)
__UpperCamelCase : List[str] = ["".join(A__) for row in temp_grid]
__UpperCamelCase : Any = "".join(A__)
return output_string
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = []
__UpperCamelCase : Tuple = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1:
return input_string
__UpperCamelCase : list[list[str]] = [[] for _ in range(A__)] # generates template
for position in range(len(A__)):
__UpperCamelCase : Any = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : List[str] = min(A__ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("*")
__UpperCamelCase : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
__UpperCamelCase : Dict = input_string[counter : counter + len(A__)]
grid.append(list(A__))
counter += len(A__)
__UpperCamelCase : Optional[int] = "" # reads as zigzag
for position in range(len(A__)):
__UpperCamelCase : List[str] = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : Any = min(A__ , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = {}
for key_guess in range(1 , len(A__)): # tries every key
__UpperCamelCase : Optional[int] = decrypt(A__ , A__)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( __A ):
UpperCAmelCase__ : Dict = "Wav2Vec2FeatureExtractor"
UpperCAmelCase__ : Optional[int] = "AutoTokenizer"
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
super().__init__(lowerCAmelCase_, lowerCAmelCase_ )
UpperCamelCase : List[Any] = self.feature_extractor
UpperCamelCase : int = False
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
try:
return super().from_pretrained(lowerCAmelCase_, **lowerCAmelCase_ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowerCAmelCase_, )
UpperCamelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_, **lowerCAmelCase_ )
UpperCamelCase : str = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_, **lowerCAmelCase_ )
return cls(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
def __call__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_, **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
UpperCamelCase : Union[str, Any] = kwargs.pop('raw_speech' )
else:
UpperCamelCase : List[str] = kwargs.pop('audio', lowerCAmelCase_ )
UpperCamelCase : Optional[int] = kwargs.pop('sampling_rate', lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = kwargs.pop('text', lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
UpperCamelCase : Optional[int] = args[0]
UpperCamelCase : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
UpperCamelCase : Dict = self.feature_extractor(lowerCAmelCase_, *lowerCAmelCase_, sampling_rate=lowerCAmelCase_, **lowerCAmelCase_ )
if text is not None:
UpperCamelCase : Optional[int] = self.tokenizer(lowerCAmelCase_, **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase : Tuple = encodings["input_ids"]
return inputs
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_, **lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = kwargs.pop('input_features', lowerCAmelCase_ )
UpperCamelCase : List[str] = kwargs.pop('labels', lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
UpperCamelCase : Any = args[0]
UpperCamelCase : List[str] = args[1:]
if input_features is not None:
UpperCamelCase : List[Any] = self.feature_extractor.pad(lowerCAmelCase_, *lowerCAmelCase_, **lowerCAmelCase_ )
if labels is not None:
UpperCamelCase : Any = self.tokenizer.pad(lowerCAmelCase_, **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase : List[str] = labels["input_ids"]
return input_features
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
return self.tokenizer.batch_decode(*lowerCAmelCase_, **lowerCAmelCase_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
return self.tokenizer.decode(*lowerCAmelCase_, **lowerCAmelCase_ )
@contextmanager
def snake_case_ ( self ) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
UpperCamelCase : str = True
UpperCamelCase : Optional[int] = self.tokenizer
yield
UpperCamelCase : str = self.feature_extractor
UpperCamelCase : str = False
| 119 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ (__A ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = relative_attention
UpperCAmelCase_ : List[Any] = position_biased_input
UpperCAmelCase_ : Dict = pos_att_type
UpperCAmelCase_ : Optional[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : int = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]:
UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = DebertaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 268 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 174 |
"""simple docstring"""
import os
def snake_case ( ):
with open(os.path.dirname(A__ ) + "/grid.txt" ) as f:
UpperCAmelCase_ : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
UpperCAmelCase_ : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ : Tuple = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 268 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 187 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase_ : Any = 6
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : int = 1_92
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Any = num_heads
return config
def snake_case ( A__ ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[str] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : int = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Any = "swin." + name
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split("." )
UpperCAmelCase_ : str = int(key_split[2] )
UpperCAmelCase_ : Union[str, Any] = int(key_split[4] )
UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[:dim, :]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ )
UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ )
model.eval()
UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ )
model.load_state_dict(A__ )
UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 268 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : str = logging.getLogger()
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
return args.f
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = os.path.join(A__ , 'all_results.json' )
if os.path.exists(A__ ):
with open(A__ , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(A__ )
else:
raise ValueError(f"can't find {path}" )
return results
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
lowerCamelCase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( __A ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE_ = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'translation_no_trainer' ) ) )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(lowerCAmelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'image_classification_no_trainer' ) ) ) | 225 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''rwkv'''
__magic_name__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = rescale_every
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple = False ) -> List[str]:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__UpperCAmelCase : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
__UpperCAmelCase : int = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(A__ , 1 ):
if n < _p:
# then we have our last prime to check
__UpperCAmelCase : int = primes[:idx]
break
__UpperCAmelCase : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__UpperCAmelCase : Tuple = False
for r in range(A__ ):
__UpperCAmelCase : Optional[Any] = pow(A__ , d * 2**r , A__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__UpperCAmelCase : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 115 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def snake_case ( A__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( A__ ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( A__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
UpperCAmelCase_ : List[str] = Node(1 )
UpperCAmelCase_ : Any = Node(2 )
UpperCAmelCase_ : Optional[Any] = Node(3 )
UpperCAmelCase_ : Union[str, Any] = Node(4 )
UpperCAmelCase_ : int = Node(5 )
UpperCAmelCase_ : Optional[int] = Node(6 )
UpperCAmelCase_ : Any = Node(7 )
UpperCAmelCase_ : List[str] = Node(8 )
UpperCAmelCase_ : List[Any] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("Tree is: " )
display(A__ )
if __name__ == "__main__":
main()
| 268 | 0 |
from collections import namedtuple
lowercase__ : Any = namedtuple('''from_to''', '''from_ to''')
lowercase__ : Optional[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ''', '''.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ''', '''.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
if index == number_of_items:
return 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack(
A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 )
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ ,stream=A__ ).raw ).convert("""RGB""" )
return image
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(A__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_SCREAMING_SNAKE_CASE = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(A__ ,requires_grad=A__ ), v_bias) )
_SCREAMING_SNAKE_CASE = qkv_bias
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 3_64 if "coco" in model_name else 2_24
_SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=A__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-2.7b""" ,eos_token_id=A__ ).to_dict()
elif "opt-6.7b" in model_name:
_SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-6.7b""" ,eos_token_id=A__ ).to_dict()
elif "t5-xl" in model_name:
_SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
_SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=A__ ,text_config=A__ )
return config, image_size
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__=None ,snake_case__=False ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_SCREAMING_SNAKE_CASE = tokenizer("""\n""" ,add_special_tokens=A__ ).input_ids[0]
_SCREAMING_SNAKE_CASE = get_blipa_config(A__ ,eos_token_id=A__ )
_SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(A__ ).eval()
_SCREAMING_SNAKE_CASE = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
_SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=A__ ,model_type=A__ ,is_eval=A__ ,device=A__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_SCREAMING_SNAKE_CASE = original_model.state_dict()
_SCREAMING_SNAKE_CASE = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
if key.startswith("""Qformer.bert""" ):
_SCREAMING_SNAKE_CASE = key.replace("""Qformer.bert""" ,"""qformer""" )
if "attention.self" in key:
_SCREAMING_SNAKE_CASE = key.replace("""self""" ,"""attention""" )
if "opt_proj" in key:
_SCREAMING_SNAKE_CASE = key.replace("""opt_proj""" ,"""language_projection""" )
if "t5_proj" in key:
_SCREAMING_SNAKE_CASE = key.replace("""t5_proj""" ,"""language_projection""" )
if key.startswith("""opt""" ):
_SCREAMING_SNAKE_CASE = key.replace("""opt""" ,"""language""" )
if key.startswith("""t5""" ):
_SCREAMING_SNAKE_CASE = key.replace("""t5""" ,"""language""" )
_SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(A__ ,A__ )
_SCREAMING_SNAKE_CASE = hf_model.load_state_dict(A__ ,strict=A__ )
assert len(A__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_SCREAMING_SNAKE_CASE = load_demo_image()
_SCREAMING_SNAKE_CASE = vis_processors["eval"](A__ ).unsqueeze(0 ).to(A__ )
_SCREAMING_SNAKE_CASE = tokenizer(["""\n"""] ,return_tensors="""pt""" ).input_ids.to(A__ )
# create processor
_SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} ,image_mean=A__ ,image_std=A__ )
_SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=A__ ,tokenizer=A__ )
_SCREAMING_SNAKE_CASE = processor(images=A__ ,return_tensors="""pt""" ).pixel_values.to(A__ )
# make sure processor creates exact same pixel values
assert torch.allclose(A__ ,A__ )
original_model.to(A__ )
hf_model.to(A__ )
with torch.no_grad():
if "opt" in model_name:
_SCREAMING_SNAKE_CASE = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_SCREAMING_SNAKE_CASE = hf_model(A__ ,A__ ).logits
else:
_SCREAMING_SNAKE_CASE = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-1_00 )
_SCREAMING_SNAKE_CASE = hf_model(A__ ,A__ ,labels=A__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" ,original_logits[0, :3, :3] )
print("""First values of HF logits:""" ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] ,device=A__ )
assert torch.allclose(logits[0, :3, :3] ,A__ ,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] ,device=A__ )
else:
# cast to same type
_SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(A__ ) ,A__ ,atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = tokenizer(A__ ,return_tensors="""pt""" ).input_ids.to(A__ )
_SCREAMING_SNAKE_CASE = original_model.generate({"""image""": original_pixel_values} )
_SCREAMING_SNAKE_CASE = hf_model.generate(
A__ ,A__ ,do_sample=A__ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("""Original generation:""" ,A__ )
_SCREAMING_SNAKE_CASE = input_ids.shape[1]
_SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print("""HF generation:""" ,A__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCamelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 306 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case = '\\n Text data.\n Second line of data.'
_snake_case = 'file'
@pytest.fixture(scope="session" )
def _A ( snake_case ) -> List[Any]:
_lowercase : List[Any] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowercase : str = bytes(A__ , "utf-8" )
with zstd.open(A__ , "wb" ) as f:
f.write(A__ )
return path
@pytest.fixture
def _A ( snake_case ) -> Optional[Any]:
with open(os.path.join(tmpfs.local_root_dir , A__ ) , "w" ) as f:
f.write(A__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowercase : List[Any] = input_paths[compression_format]
_lowercase : int = tmp_path / "cache"
_lowercase : Optional[int] = DownloadConfig(cache_dir=A__ , extract_compressed_file=A__ )
_lowercase : str = cached_path(A__ , download_config=A__ )
with open(A__ ) as f:
_lowercase : Any = f.read()
with open(A__ ) as f:
_lowercase : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Optional[Any] = "custom_cache"
_lowercase : str = "custom_extracted_dir"
_lowercase : Dict = tmp_path / "custom_extracted_path"
if default_extracted:
_lowercase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , A__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) )
_lowercase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowercase : str = xz_file
_lowercase : Optional[Any] = (
DownloadConfig(extract_compressed_file=A__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=A__ )
)
_lowercase : Tuple = cached_path(A__ , download_config=A__ )
assert Path(A__ ).parent.parts[-2:] == expected
def _A ( snake_case ) -> Any:
# absolute path
_lowercase : Union[str, Any] = str(Path(A__ ).resolve() )
assert cached_path(A__ ) == text_file
# relative path
_lowercase : Dict = str(Path(A__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(A__ ) == text_file
def _A ( snake_case ) -> int:
# absolute path
_lowercase : Dict = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(A__ ):
cached_path(A__ )
# relative path
_lowercase : Any = "./__missing_file__.txt"
with pytest.raises(A__ ):
cached_path(A__ )
def _A ( snake_case ) -> Any:
_lowercase : Union[str, Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(A__ ) as f:
_lowercase : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( ) -> Optional[Any]:
with pytest.raises(A__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
http_get("https://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[int]:
_lowercase : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
ftp_get("ftp://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , A__ )
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(A__ ):
fsspec_get("s3://huggingface.co" , temp_file=A__ )
with pytest.raises(A__ ):
fsspec_head("s3://huggingface.co" )
| 250 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''detr'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Optional[Any] = backbone_config
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Dict = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Optional[int] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = init_std
UpperCAmelCase_ : Union[str, Any] = init_xavier_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : str = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = mask_loss_coefficient
UpperCAmelCase_ : Dict = dice_loss_coefficient
UpperCAmelCase_ : Any = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]:
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 12
| 268 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = len(A__)
for i in range(A__):
for j in range(i + 1 , A__):
if numbers[j] < numbers[i]:
__UpperCamelCase : Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase : str = input('Enter numbers separated by a comma:\n').strip()
lowercase : Any = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted)) | 232 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = BasicTokenizer()
UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : Tuple = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ["的", "人", "有"]
UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Optional[int]:
UpperCamelCase : Tuple = 0
UpperCamelCase : List[str] = len(A__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase : Tuple = i + 1
else:
UpperCamelCase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 119 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''t5'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : str = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : int = num_layers
UpperCAmelCase_ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Any = relative_attention_num_buckets
UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = dropout_rate
UpperCAmelCase_ : Tuple = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : int = feed_forward_proj
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Optional[int] = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : int = "gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Union[str, Any] = {0: "batch"}
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 13
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : Tuple = list[tuple[int, int]]
_UpperCAmelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCAmelCase : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
def _snake_case (self ):
__lowerCAmelCase = abs(self.pos_x - self.goal_x )
__lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , __lowercase ):
return self.f_cost < other.f_cost
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def _snake_case (self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def _snake_case (self , __lowercase ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = (0, 0)
_UpperCAmelCase : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_UpperCAmelCase : Union[str, Any] = GreedyBestFirst(init, goal)
_UpperCAmelCase : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_UpperCAmelCase : str = 2
for elem in grid:
print(elem)
| 174 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = ["LayoutLMv3FeatureExtractor"]
lowercase__ : Optional[int] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 187 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Optional[int] = boundary[0]
UpperCAmelCase_ : str = boundary[1]
UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ )
UpperCAmelCase_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Optional[Any] = x + h
def snake_case ( A__ ): # enter your function here
UpperCAmelCase_ : Dict = (x - 0) * (x - 0)
return y
def snake_case ( ):
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration
UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 268 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(A__ , A__ )
if k.startswith('encoder' ):
SCREAMING_SNAKE_CASE_ = k.replace('.attn' , '.self_attn' )
SCREAMING_SNAKE_CASE_ = k.replace('norm1' , 'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
SCREAMING_SNAKE_CASE_ = k.replace('norm1' , 'self_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm2' , 'encoder_attn_layer_norm' )
SCREAMING_SNAKE_CASE_ = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
SCREAMING_SNAKE_CASE_ = sd.pop(A__ )
SCREAMING_SNAKE_CASE_ = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
SCREAMING_SNAKE_CASE_ = v
lowerCamelCase__ : Dict = ['START']
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = torch.load(A__ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ = model["model"]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig.from_json_file(A__ )
SCREAMING_SNAKE_CASE_ = BlenderbotForConditionalGeneration(A__ )
SCREAMING_SNAKE_CASE_ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 225 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case ( A__ ,A__ ,A__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Dict = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1
UpperCAmelCase_ : int | float = 0
for i in range(A__ ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : str = summ
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Dict = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[Any] = summ
UpperCAmelCase_ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case ( A__ ):
UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )]
UpperCAmelCase_ : str = time.time()
max_subarray(A__ ,0 ,input_size - 1 )
UpperCAmelCase_ : int = time.time()
return end - start
def snake_case ( ):
UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(A__ ,A__ ):
print(A__ ,"\t\t" ,A__ )
plt.plot(A__ ,A__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 268 | 0 |
"""simple docstring"""
from typing import Any
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
if not input_list:
return []
a__: Optional[int] = [input_list.count(A__ ) for value in input_list]
a__: Optional[int] = max(A__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase_ = list[tuple[int, int]]
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict:
UpperCAmelCase_ : Any = pos_x
UpperCAmelCase_ : str = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : int = goal_x
UpperCAmelCase_ : Tuple = goal_y
UpperCAmelCase_ : Union[str, Any] = parent
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = [self.start]
UpperCAmelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ : Optional[Any] = True
return self.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ )
for node in successors:
self.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]:
UpperCAmelCase_ : List[str] = []
for action in delta:
UpperCAmelCase_ : List[Any] = parent.pos_x + action[1]
UpperCAmelCase_ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path:
UpperCAmelCase_ : Union[str, Any] = node
UpperCAmelCase_ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Tuple = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ : str = True
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = current_bwd_node
UpperCAmelCase_ : List[str] = current_fwd_node
UpperCAmelCase_ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path:
UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BreadthFirstSearch(init, goal)
lowerCamelCase_ = bfs.search()
lowerCamelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ = bd_bfs.search()
lowerCamelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 268 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __A , unittest.TestCase ):
"""simple docstring"""
__a = XGLMTokenizer
__a = XGLMTokenizerFast
__a = True
__a = True
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = "<pad>"
__UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 1_008 )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__UpperCAmelCase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
__UpperCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_ )
__UpperCAmelCase : List[str] = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : str = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
__UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__UpperCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = "Hello World!"
__UpperCAmelCase : Optional[Any] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
__UpperCAmelCase : Optional[int] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase_ , )
| 115 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = MBartTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : str = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase_ : List[Any] = src_lang
UpperCAmelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None:
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 268 | 0 |
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase_ ( __A ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 42
UpperCAmelCase_ : Any = None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=0.9_99 , snake_case__="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase = []
for i in range(A__ ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowercase_ ( __A , __A ):
"""simple docstring"""
UpperCAmelCase_ : Any = 1
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 1000 , __SCREAMING_SNAKE_CASE = 0.0_0_0_1 , __SCREAMING_SNAKE_CASE = 0.0_2 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = 1.0 , **__SCREAMING_SNAKE_CASE , ) ->Any:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase_ ) is not None:
lowerCAmelCase = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
lowerCAmelCase = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowerCAmelCase = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(lowerCAmelCase_ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase = 1.0
# setable values
lowerCAmelCase = None
lowerCAmelCase = torch.from_numpy(np.arange(0 , lowerCAmelCase_ ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->str:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowerCAmelCase = num_inference_steps
lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ) ->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase = model_output
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __len__( self ) ->Dict:
return self.config.num_train_timesteps
| 338 |
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase :
def __init__( self: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]=2 , UpperCAmelCase_: int=3 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: Any=2 , UpperCAmelCase_: str=7 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[int]=99 , UpperCAmelCase_: List[Any]=36 , UpperCAmelCase_: Optional[Any]=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Dict="gelu" , UpperCAmelCase_: Optional[int]=0.1 , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: Any=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: List[Any]=0.02 , UpperCAmelCase_: List[Any]=6 , UpperCAmelCase_: Optional[Any]=6 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: List[str]=4 , UpperCAmelCase_: Dict=None , UpperCAmelCase_: Optional[int]=1_000 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = text_seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = coordinate_size
_SCREAMING_SNAKE_CASE = shape_size
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_SCREAMING_SNAKE_CASE = text_seq_length
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
_SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE = t
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# text + image
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_SCREAMING_SNAKE_CASE = model(pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Dict , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (__A ,__A ,unittest.TestCase ):
__snake_case : Tuple = False
__snake_case : int = False
__snake_case : Optional[Any] = False
__snake_case : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return True
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in get_values(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
return inputs_dict
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@cached_property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values.to(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(lowerCAmelCase_ ) , bbox=bbox.to(lowerCAmelCase_ ) , pixel_values=pixel_values.to(lowerCAmelCase_ ) , )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 306 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class a__ :
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
_lowercase : Optional[Any] = model
_lowercase : Optional[Any] = kwargs.get("model_save_dir" , lowerCAmelCase_ )
_lowercase : Any = kwargs.get("latest_model_name" , lowerCAmelCase_ )
def __call__( self , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
_lowercase : Tuple = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowercase : Any = self.model_save_dir.joinpath(self.latest_model_name )
_lowercase : Union[str, Any] = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowercase : List[Any] = self.model_save_dir.joinpath(lowerCAmelCase_ )
if src_path.exists():
_lowercase : Tuple = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase , ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase_ ):
_lowercase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
_lowercase : Dict = Path(lowerCAmelCase_ )
# load model from hub
else:
# download model
_lowercase : Any = hf_hub_download(
repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , )
_lowercase : Union[str, Any] = Path(lowerCAmelCase_ ).parent
_lowercase : Dict = Path(lowerCAmelCase_ ).name
_lowercase : int = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = None
if len(str(lowerCAmelCase_ ).split("@" ) ) == 2:
_lowercase : Dict = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 250 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
_UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class a ( __A ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = ['input_ids', 'attention_mask']
UpperCamelCase : Any = []
def __init__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : List[Any]="<pad>" , lowerCAmelCase : Tuple="[SEP]" , lowerCAmelCase : int="[MASK]" , lowerCAmelCase : Optional[int]="[CLS]" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : int , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
SCREAMING_SNAKE_CASE_: Tuple =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
SCREAMING_SNAKE_CASE_: List[str] =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
SCREAMING_SNAKE_CASE_: Any =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
SCREAMING_SNAKE_CASE_: int =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Any =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
SCREAMING_SNAKE_CASE_: Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE_: Any =vocab_file
SCREAMING_SNAKE_CASE_: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ={self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_: List[Any] =None
return state
def __setstate__( self : int , lowerCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE_: Tuple ={}
SCREAMING_SNAKE_CASE_: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any ) -> int:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[]
SCREAMING_SNAKE_CASE_: List[str] =""
SCREAMING_SNAKE_CASE_: int =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
SCREAMING_SNAKE_CASE_: Dict =True
SCREAMING_SNAKE_CASE_: str =[]
else:
current_sub_tokens.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : bool = False , lowerCAmelCase : bool = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: int =self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_: Tuple =[]
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE_: List[str] =re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] ="".join(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Dict =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_: str =self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: Any =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE_: int =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple =[self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 173 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = '''dummy_data'''
__magic_name__ = '''datasets'''
__magic_name__ = False
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = dataset_name
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : Tuple = use_local_dummy_data
UpperCAmelCase_ : int = config
# download_callbacks take a single url as input
UpperCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase_ : Dict = str(lowerCAmelCase_ )
# to be downloaded
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._dummy_file is None:
UpperCAmelCase_ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase_ : Union[str, Any] = cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self._bucket_url is None:
UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase_ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return path
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
UpperCAmelCase_ : Optional[int] = single_urls
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
UpperCAmelCase_ : int = value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
UpperCAmelCase_ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url )
UpperCAmelCase_ : Union[str, Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
def _iter_archive_members(lowerCAmelCase_ : Dict ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase_ : str = Path(self.dummy_file ).parent
UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase_ : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = [paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase__ ( __A):
'''simple docstring'''
_A = 4_2
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Optional[int] , a :List[str]=3 , a :int=3 , a :List[str]=("DownEncoderBlock2D",) , a :str=(6_4,) , a :Union[str, Any]=2 , a :Any=3_2 , a :str="silu" , a :Optional[int]=True , ) -> List[Any]:
super().__init__()
__UpperCamelCase : Optional[int] = layers_per_block
__UpperCamelCase : Tuple = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Optional[Any] = nn.ModuleList([] )
# down
__UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__UpperCamelCase : Union[str, Any] = output_channel
__UpperCamelCase : Dict = block_out_channels[i]
__UpperCamelCase : Optional[int] = i == len(lowerCAmelCase_ ) - 1
__UpperCamelCase : Optional[int] = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__UpperCamelCase : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
__UpperCamelCase : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
__UpperCamelCase : List[str] = nn.SiLU()
__UpperCamelCase : Any = 2 * out_channels if double_z else out_channels
__UpperCamelCase : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
__UpperCamelCase : Optional[int] = False
def _lowerCamelCase ( self :Optional[int] , a :int ) -> str:
__UpperCamelCase : Dict = x
__UpperCamelCase : int = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a :Optional[Any] ):
def custom_forward(*a :Optional[Any] ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
__UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
__UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
__UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
__UpperCamelCase : Optional[Any] = down_block(lowerCAmelCase_ )
# middle
__UpperCamelCase : Tuple = self.mid_block(lowerCAmelCase_ )
# post-process
__UpperCamelCase : List[str] = self.conv_norm_out(lowerCAmelCase_ )
__UpperCamelCase : str = self.conv_act(lowerCAmelCase_ )
__UpperCamelCase : Optional[int] = self.conv_out(lowerCAmelCase_ )
return sample
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :str , a :Union[str, Any]=3 , a :Any=3 , a :Optional[int]=("UpDecoderBlock2D",) , a :Dict=(6_4,) , a :Union[str, Any]=2 , a :Tuple=3_2 , a :Dict="silu" , a :Union[str, Any]="group" , ) -> int:
super().__init__()
__UpperCamelCase : Optional[int] = layers_per_block
__UpperCamelCase : List[Any] = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCamelCase : str = None
__UpperCamelCase : Optional[Any] = nn.ModuleList([] )
__UpperCamelCase : List[str] = in_channels if norm_type == "spatial" else None
# mid
__UpperCamelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
__UpperCamelCase : Optional[int] = list(reversed(lowerCAmelCase_ ) )
__UpperCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__UpperCamelCase : Optional[int] = output_channel
__UpperCamelCase : int = reversed_block_out_channels[i]
__UpperCamelCase : str = i == len(lowerCAmelCase_ ) - 1
__UpperCamelCase : int = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
__UpperCamelCase : str = output_channel
# out
if norm_type == "spatial":
__UpperCamelCase : List[str] = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
__UpperCamelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
__UpperCamelCase : List[Any] = nn.SiLU()
__UpperCamelCase : Optional[Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
__UpperCamelCase : List[str] = False
def _lowerCamelCase ( self :List[str] , a :str , a :Dict=None ) -> List[Any]:
__UpperCamelCase : Optional[Any] = z
__UpperCamelCase : Any = self.conv_in(lowerCAmelCase_ )
__UpperCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a :Optional[int] ):
def custom_forward(*a :Optional[int] ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
__UpperCamelCase : Optional[Any] = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
__UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase : Any = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
__UpperCamelCase : Optional[Any] = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase : Optional[Any] = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase : Tuple = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
__UpperCamelCase : Optional[int] = self.conv_norm_out(lowerCAmelCase_ )
else:
__UpperCamelCase : Tuple = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCamelCase : Optional[Any] = self.conv_act(lowerCAmelCase_ )
__UpperCamelCase : List[str] = self.conv_out(lowerCAmelCase_ )
return sample
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Any , a :List[str] , a :Optional[int]=None , a :List[Any]="random" , a :Union[str, Any]=False , a :str=True ) -> Any:
super().__init__()
__UpperCamelCase : List[str] = n_e
__UpperCamelCase : List[Any] = vq_embed_dim
__UpperCamelCase : List[str] = beta
__UpperCamelCase : Union[str, Any] = legacy
__UpperCamelCase : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__UpperCamelCase : List[str] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__UpperCamelCase : List[Any] = self.used.shape[0]
__UpperCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCamelCase : Union[str, Any] = self.re_embed
__UpperCamelCase : Optional[Any] = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__UpperCamelCase : List[str] = n_e
__UpperCamelCase : Optional[Any] = sane_index_shape
def _lowerCamelCase ( self :Tuple , a :List[Any] ) -> Any:
__UpperCamelCase : str = inds.shape
assert len(lowerCAmelCase_ ) > 1
__UpperCamelCase : int = inds.reshape(ishape[0] , -1 )
__UpperCamelCase : Tuple = self.used.to(lowerCAmelCase_ )
__UpperCamelCase : Any = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCamelCase : Optional[Any] = match.argmax(-1 )
__UpperCamelCase : Optional[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__UpperCamelCase : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__UpperCamelCase : List[str] = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def _lowerCamelCase ( self :Any , a :Tuple ) -> Any:
__UpperCamelCase : str = inds.shape
assert len(lowerCAmelCase_ ) > 1
__UpperCamelCase : Dict = inds.reshape(ishape[0] , -1 )
__UpperCamelCase : str = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
__UpperCamelCase : Union[str, Any] = 0 # simply set to zero
__UpperCamelCase : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def _lowerCamelCase ( self :Dict , a :Union[str, Any] ) -> Optional[Any]:
# reshape z -> (batch, height, width, channel) and flatten
__UpperCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
__UpperCamelCase : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCamelCase : Optional[Any] = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
__UpperCamelCase : List[Any] = self.embedding(lowerCAmelCase_ ).view(z.shape )
__UpperCamelCase : List[str] = None
__UpperCamelCase : List[Any] = None
# compute loss for embedding
if not self.legacy:
__UpperCamelCase : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__UpperCamelCase : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__UpperCamelCase : str = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCamelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__UpperCamelCase : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__UpperCamelCase : str = self.remap_to_used(lowerCAmelCase_ )
__UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__UpperCamelCase : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowerCamelCase ( self :List[Any] , a :Dict , a :Union[str, Any] ) -> Optional[int]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__UpperCamelCase : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
__UpperCamelCase : str = self.unmap_to_all(lowerCAmelCase_ )
__UpperCamelCase : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__UpperCamelCase : Any = self.embedding(lowerCAmelCase_ )
if shape is not None:
__UpperCamelCase : List[Any] = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
__UpperCamelCase : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase__ ( __A):
'''simple docstring'''
def __init__( self :Optional[Any] , a :str , a :Optional[Any]=False ) -> List[Any]:
__UpperCamelCase : int = parameters
__UpperCamelCase : Any = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
__UpperCamelCase : Dict = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
__UpperCamelCase : List[Any] = deterministic
__UpperCamelCase : Tuple = torch.exp(0.5 * self.logvar )
__UpperCamelCase : List[str] = torch.exp(self.logvar )
if self.deterministic:
__UpperCamelCase : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowerCamelCase ( self :str , a :Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__UpperCamelCase : int = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
__UpperCamelCase : Optional[int] = self.mean + self.std * sample
return x
def _lowerCamelCase ( self :List[Any] , a :Optional[Any]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowerCamelCase ( self :Any , a :List[str] , a :str=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
__UpperCamelCase : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def _lowerCamelCase ( self :Optional[int] ) -> int:
return self.mean | 232 |
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 119 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ (__A ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = relative_attention
UpperCAmelCase_ : List[Any] = position_biased_input
UpperCAmelCase_ : Dict = pos_att_type
UpperCAmelCase_ : Optional[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : int = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]:
UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = DebertaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__( lowerCamelCase, lowerCamelCase):
if b == 0:
return (1, 0)
(__lowerCAmelCase) = extended_euclid(A__, a % b)
__lowerCAmelCase = a // b
return (y, x - k * y)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
(__lowerCAmelCase) = extended_euclid(A__, A__)
__lowerCAmelCase = na * na
__lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def __magic_name__( lowerCamelCase, lowerCamelCase):
(__lowerCAmelCase) = extended_euclid(A__, A__)
if b < 0:
__lowerCAmelCase = (b % n + n) % n
return b
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = invert_modulo(A__, A__), invert_modulo(A__, A__)
__lowerCAmelCase = na * na
__lowerCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 174 |
"""simple docstring"""
import os
def snake_case ( ):
with open(os.path.dirname(A__ ) + "/grid.txt" ) as f:
UpperCAmelCase_ : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
UpperCAmelCase_ : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ : Tuple = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 268 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase__ : Any = 2_9979_2458
# Symbols
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = symbols("ct x y z")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / sqrt(1 - beta(A__ ) ** 2 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return np.array(
[
[gamma(A__ ), -gamma(A__ ) * beta(A__ ), 0, 0],
[-gamma(A__ ) * beta(A__ ), gamma(A__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase__ ( _A , _A = None ):
'''simple docstring'''
if event is None:
snake_case_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase__ : int = transform(2997_9245)
print("Example of four vector: ")
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowercase__ : Any = {ct: c, x: 1, y: 1, z: 1}
lowercase__ : Union[str, Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 187 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase_ : Any = 6
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : int = 1_92
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Any = num_heads
return config
def snake_case ( A__ ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[str] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : int = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Any = "swin." + name
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split("." )
UpperCAmelCase_ : str = int(key_split[2] )
UpperCAmelCase_ : Union[str, Any] = int(key_split[4] )
UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[:dim, :]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ )
UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ )
model.eval()
UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ )
model.load_state_dict(A__ )
UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 268 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Any:
if not isinstance(A__ , A__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ = 1
for i in range(0 , len(A__ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE_ = str(A__ )
steps += 1
return steps
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> int:
if not isinstance(A__ , A__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , len(A__ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE_ = str(A__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 225 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''rwkv'''
__magic_name__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = rescale_every
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 268 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase__ = datasets.logging.get_logger(__name__)
lowercase__ = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
lowercase__ = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowercase__ = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="dummy_doc" ) ->List[str]:
a__: List[str] = {doc: key_lines}
a__: str = {doc: sys_lines}
a__: int = {}
a__: List[str] = 0
a__: Union[str, Any] = 0
a__: str = 0
a__: int = 0
a__: Optional[Any] = 0
a__: Union[str, Any] = 0
a__: Optional[Any] = reader.get_doc_mentions(A__ , key_doc_lines[doc] , A__ )
key_singletons_num += singletons_num
if NP_only or min_span:
a__: Tuple = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
a__: List[Any] = reader.get_doc_mentions(A__ , sys_doc_lines[doc] , A__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__: Any = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
if remove_nested:
a__: int = reader.remove_nested_coref_mentions(A__ , A__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__: Optional[int] = reader.remove_nested_coref_mentions(A__ , A__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__: Tuple = reader.get_mention_assignments(A__ , A__ )
a__: int = reader.get_mention_assignments(A__ , A__ )
a__: int = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: int = get_coref_infos(A__ , A__ , A__ , A__ , A__ , A__ )
a__: Optional[int] = {}
a__: Tuple = 0
a__: Dict = 0
for name, metric in metrics:
a__: Union[str, Any] = evaluator.evaluate_documents(A__ , A__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
a__: List[Any] = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: int = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a__: Any = line.split()[5]
if not parse_col == "-":
a__: List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False) -> List[str]:
'''simple docstring'''
a__: Tuple = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
a__: List[Any] = util.check_gold_parse_annotation(lowerCAmelCase_)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__: List[str] = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 290 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( __A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """FlavaImageProcessor"""
__a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
__UpperCAmelCase : Any = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : List[str] = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[ImageInput] = None , UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
__UpperCAmelCase : Dict = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase__ ( self : str , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase__ ( self : str , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 115 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def snake_case ( A__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( A__ ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( A__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
UpperCAmelCase_ : List[str] = Node(1 )
UpperCAmelCase_ : Any = Node(2 )
UpperCAmelCase_ : Optional[Any] = Node(3 )
UpperCAmelCase_ : Union[str, Any] = Node(4 )
UpperCAmelCase_ : int = Node(5 )
UpperCAmelCase_ : Optional[int] = Node(6 )
UpperCAmelCase_ : Any = Node(7 )
UpperCAmelCase_ : List[str] = Node(8 )
UpperCAmelCase_ : List[Any] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("Tree is: " )
display(A__ )
if __name__ == "__main__":
main()
| 268 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 1_0_0 , ) -> Dict:
assert np.shape(A__ )[0] == np.shape(A__ )[1]
# Ensure proper dimensionality.
assert np.shape(A__ )[0] == np.shape(A__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A__ ) == np.iscomplexobj(A__ )
lowerCAmelCase = np.iscomplexobj(A__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase = np.dot(A__ , A__ )
# Normalize the resulting output vector.
lowerCAmelCase = w / np.linalg.norm(A__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase = vector.conj().T if is_complex else vector.T
lowerCAmelCase = np.dot(A__ , np.dot(A__ , A__ ) )
# Check convergence.
lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase = True
lowerCAmelCase = lambda_
if is_complex:
lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
lowerCAmelCase = np.array([4_1, 4, 2_0] )
lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase = real_input_matrix
lowerCAmelCase = real_vector
elif problem_type == "complex":
lowerCAmelCase = complex_input_matrix
lowerCAmelCase = complex_vector
# Our implementation.
lowerCAmelCase = power_iteration(A__ , A__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase = np.linalg.eigh(A__ )
# Last eigenvalue is the maximum one.
lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A__ ) - np.abs(A__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 338 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
if index == number_of_items:
return 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack(
A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 )
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase (__A ):
__snake_case : str = ["image_processor", "tokenizer"]
__snake_case : Optional[Any] = "ViltImageProcessor"
__snake_case : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self: List[str] , UpperCAmelCase_: int=None , UpperCAmelCase_: int=None , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_: Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: int = 0 , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel_values + pixel_mask
_SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Union[str, Any] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Union[str, Any] , **UpperCAmelCase_: Dict ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 306 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> int:
return len(set(A__ ) ) == len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class a ( __A ):
def __init__( self : List[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : int=None , lowerCAmelCase : str=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ={}
SCREAMING_SNAKE_CASE_: List[Any] ={}
if prompt is not None:
SCREAMING_SNAKE_CASE_: List[Any] =prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE_: str =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE_: List[Any] ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Tuple , lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase : int ) -> List[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_image(lowerCAmelCase_ )
if prompt is not None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE_: List[str] =self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_: Any =self.tokenizer(text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids
SCREAMING_SNAKE_CASE_: Optional[int] =[self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(lowerCAmelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.image_processor(images=lowerCAmelCase_ , header_text=lowerCAmelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_: str =self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE_: Dict =self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =None
return model_inputs
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=None ) -> Dict:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCAmelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE_: str =None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE_: Any ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE_: Optional[Any] =model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE_: List[Any] =self.model.generate(lowerCAmelCase_ , **lowerCAmelCase_ , **lowerCAmelCase_ )
return model_outputs
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =[]
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE_: int ={
"generated_text": self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , )
}
records.append(lowerCAmelCase_ )
return records
| 173 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''detr'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Optional[Any] = backbone_config
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Dict = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Optional[int] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = init_std
UpperCAmelCase_ : Union[str, Any] = init_xavier_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : str = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = mask_loss_coefficient
UpperCAmelCase_ : Dict = dice_loss_coefficient
UpperCAmelCase_ : Any = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]:
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 12
| 268 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Any) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = len([g for position, g in enumerate(A__) if g == main_target[position]])
return (item, float(A__))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = random.randint(0 , len(A__) - 1)
__UpperCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Dict) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[str] = list(A__)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
__UpperCamelCase : Tuple = random.choice(A__)
return "".join(A__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Tuple , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase : Union[str, Any] = int(parent_a[1] * 100) + 1
__UpperCamelCase : Tuple = 10 if child_n >= 10 else child_n
for _ in range(A__):
__UpperCamelCase : Tuple = population_score[random.randint(0 , A__)][0]
__UpperCamelCase : List[str] = crossover(parent_a[0] , A__)
# Append new string to the population list.
pop.append(mutate(A__ , A__))
pop.append(mutate(A__ , A__))
return pop
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str = True) -> Union[str, Any]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase : List[str] = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(A__)
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase : List[Any] = sorted({c for c in target if c not in genes})
if not_in_genes_list:
__UpperCamelCase : str = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(A__)
# Generate random starting population.
__UpperCamelCase : Optional[int] = []
for _ in range(A__):
population.append("".join([random.choice(A__) for i in range(len(A__))]))
# Just some logs to know what the algorithms is doing.
__UpperCamelCase : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase : Union[str, Any] = [evaluate(A__ , A__) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase : Dict = sorted(A__ , key=lambda _lowerCamelCase: x[1] , reverse=A__)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase : Dict = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(A__)
# Normalize population score to be between 0 and 1.
__UpperCamelCase : Optional[Any] = [
(item, score / len(A__)) for item, score in population_score
]
# This is selection
for i in range(A__):
population.extend(select(population_score[int(A__)] , A__ , A__))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : Dict = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowercase : List[Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowercase , lowercase , lowercase : Any = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
) | 232 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = BasicTokenizer()
UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : Tuple = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ["的", "人", "有"]
UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCAmelCase = namedtuple('''covid_data''', '''cases deaths recovered''')
def UpperCamelCase ( snake_case__ : int = "https://www.worldometers.info/coronavirus/" ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(A__ ).content ).xpath(A__ ) )
__UpperCAmelCase = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 119 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''t5'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : str = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : int = num_layers
UpperCAmelCase_ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Any = relative_attention_num_buckets
UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance
UpperCAmelCase_ : Optional[Any] = dropout_rate
UpperCAmelCase_ : Tuple = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : int = feed_forward_proj
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Optional[int] = act_info[0] == "gated"
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : int = "gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence"
UpperCAmelCase_ : Union[str, Any] = {0: "batch"}
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 13
| 268 | 0 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __magic_name__( lowerCamelCase, lowerCamelCase=0):
return sorted(A__, key=lambda lowerCamelCase: x[column])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=float('''inf''')):
for i in range(points_counts - 1):
for j in range(i + 1, A__):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=float('''inf''')):
for i in range(min(6, points_counts - 1), A__):
for j in range(max(0, i - 6), A__):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
# base case
if points_counts <= 3:
return dis_between_closest_pair(A__, A__)
# recursion
__lowerCAmelCase = points_counts // 2
__lowerCAmelCase = closest_pair_of_points_sqr(
A__, points_sorted_on_y[:mid], A__)
__lowerCAmelCase = closest_pair_of_points_sqr(
A__, points_sorted_on_y[mid:], points_counts - mid)
__lowerCAmelCase = min(A__, A__)
__lowerCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(A__)
__lowerCAmelCase = dis_between_closest_in_strip(
A__, len(A__), A__)
return min(A__, A__)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = column_based_sort(A__, column=0)
__lowerCAmelCase = column_based_sort(A__, column=1)
return (
closest_pair_of_points_sqr(
A__, A__, A__)
) ** 0.5
if __name__ == "__main__":
_UpperCAmelCase : Dict = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 174 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 | 0 |
import argparse
import os
import re
lowercase__ : int = "src/diffusers"
# Pattern that looks at the indentation in a line.
lowercase__ : int = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : Union[str, Any] = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : Optional[int] = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : Any = re.compile(R"\[([^\]]+)\]")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def lowerCamelCase__ ( _A , _A="" , _A=None , _A=None ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
snake_case_ = ["\n".join(lines[:index] )]
else:
snake_case_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case_ = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(A__ ) )
if index < len(A__ ) - 1:
snake_case_ = [lines[index + 1]]
index += 1
else:
snake_case_ = []
else:
blocks.append("\n".join(A__ ) )
snake_case_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append("\n".join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def _inner(_A ):
return key(A__ ).lower().replace("_" , "" )
return _inner
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
def noop(_A ):
return x
if key is None:
snake_case_ = noop
# Constants are all uppercase, they go first.
snake_case_ = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case_ = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case_ = [obj for obj in objects if not key(A__ )[0].isupper()]
snake_case_ = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def _replace(_A ):
snake_case_ = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
snake_case_ = import_statement.split("\n" )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case_ = 2 if lines[1].strip() == "[" else 1
snake_case_ = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case_ = sort_objects(A__ , key=lambda _A : x[1] )
snake_case_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case_ = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
snake_case_ = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case_ = _re_bracket_content.sub(_replace , A__ )
return import_statement
def lowerCamelCase__ ( _A , _A=True ):
'''simple docstring'''
with open(A__ , "r" ) as f:
snake_case_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case_ = split_code_in_indented_blocks(
A__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case_ = main_blocks[block_idx]
snake_case_ = block.split("\n" )
# Get to the start of the imports.
snake_case_ = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case_ = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case_ = "\n".join(block_lines[line_idx:-1] )
snake_case_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case_ = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case_ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case_ = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case_ = [(i, key) for i, key in enumerate(A__ ) if key is not None]
snake_case_ = [x[0] for x in sorted(A__ , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case_ = 0
snake_case_ = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case_ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(A__ , "w" ) as f:
f.write("\n".join(A__ ) )
def lowerCamelCase__ ( _A=True ):
'''simple docstring'''
snake_case_ = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
snake_case_ = sort_imports(os.path.join(A__ , "__init__.py" ) , check_only=A__ )
if result:
snake_case_ = [os.path.join(A__ , "__init__.py" )]
if len(A__ ) > 0:
raise ValueError(f"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase__ : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 187 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Optional[int] = boundary[0]
UpperCAmelCase_ : str = boundary[1]
UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ )
UpperCAmelCase_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Optional[Any] = x + h
def snake_case ( A__ ): # enter your function here
UpperCAmelCase_ : Dict = (x - 0) * (x - 0)
return y
def snake_case ( ):
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration
UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 268 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCamelCase_ ( __A ):
'''simple docstring'''
lowercase_ = "xglm"
lowercase_ = ["past_key_values"]
lowercase_ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , _lowerCAmelCase : Dict=256_008 , _lowerCAmelCase : List[str]=2_048 , _lowerCAmelCase : List[Any]=1_024 , _lowerCAmelCase : Optional[Any]=4_096 , _lowerCAmelCase : Any=24 , _lowerCAmelCase : int=16 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : List[str]=2 , **_lowerCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = ffn_dim
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = attention_heads
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = use_cache
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , ) | 225 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case ( A__ ,A__ ,A__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Dict = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1
UpperCAmelCase_ : int | float = 0
for i in range(A__ ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : str = summ
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Dict = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[Any] = summ
UpperCAmelCase_ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case ( A__ ):
UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )]
UpperCAmelCase_ : str = time.time()
max_subarray(A__ ,0 ,input_size - 1 )
UpperCAmelCase_ : int = time.time()
return end - start
def snake_case ( ):
UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(A__ ,A__ ):
print(A__ ,"\t\t" ,A__ )
plt.plot(A__ ,A__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 268 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: str = len(A__ )
for i in range(1 , A__ ):
a__: Tuple = collection[i]
a__: Any = 0
a__: Any = i - 1
while low <= high:
a__: Union[str, Any] = (low + high) // 2
if val < collection[mid]:
a__: List[str] = mid - 1
else:
a__: Dict = mid + 1
for j in range(A__ , A__ , -1 ):
a__: Optional[Any] = collection[j - 1]
a__: Optional[int] = val
return collection
if __name__ == "__main__":
lowercase__ = input('Enter numbers separated by a comma:\n').strip()
lowercase__ = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 290 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase_ = list[tuple[int, int]]
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict:
UpperCAmelCase_ : Any = pos_x
UpperCAmelCase_ : str = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : int = goal_x
UpperCAmelCase_ : Tuple = goal_y
UpperCAmelCase_ : Union[str, Any] = parent
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = [self.start]
UpperCAmelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ : Optional[Any] = True
return self.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ )
for node in successors:
self.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]:
UpperCAmelCase_ : List[str] = []
for action in delta:
UpperCAmelCase_ : List[Any] = parent.pos_x + action[1]
UpperCAmelCase_ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path:
UpperCAmelCase_ : Union[str, Any] = node
UpperCAmelCase_ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Tuple = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ : str = True
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = current_bwd_node
UpperCAmelCase_ : List[str] = current_fwd_node
UpperCAmelCase_ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path:
UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BreadthFirstSearch(init, goal)
lowerCamelCase_ = bfs.search()
lowerCamelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ = bd_bfs.search()
lowerCamelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 268 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase__ ( __A ):
"""simple docstring"""
def __get__( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__UpperCAmelCase : Optional[int] = "__cached_" + self.fget.__name__
__UpperCAmelCase : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if cached is None:
__UpperCAmelCase : Union[str, Any] = self.fget(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return cached
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
if is_torch_fx_proxy(A__ ):
return True
if is_torch_available():
import torch
if isinstance(A__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A__ , np.ndarray )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
return isinstance(A__ , np.ndarray )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return _is_numpy(A__ )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
import torch
return isinstance(A__ , torch.Tensor )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A__ )
def lowerCamelCase ( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
import torch
return isinstance(A__ , torch.device )
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A__ )
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(A__ , A__ ):
if hasattr(A__ , A__ ):
__UpperCAmelCase : Tuple = getattr(A__ , A__ )
else:
return False
return isinstance(A__ , torch.dtype )
def lowerCamelCase ( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A__ )
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
import tensorflow as tf
return isinstance(A__ , tf.Tensor )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A__ )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A__ )
return type(A__ ) == tf.Tensor
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A__ )
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A__ , jnp.ndarray )
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A__ )
def lowerCamelCase ( _UpperCamelCase : Dict ) -> List[str]:
'''simple docstring'''
if isinstance(A__ , (dict, UserDict) ):
return {k: to_py_obj(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return [to_py_obj(A__ ) for o in obj]
elif is_tf_tensor(A__ ):
return obj.numpy().tolist()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A__ ):
return np.asarray(A__ ).tolist()
elif isinstance(A__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
if isinstance(A__ , (dict, UserDict) ):
return {k: to_numpy(A__ ) for k, v in obj.items()}
elif isinstance(A__ , (list, tuple) ):
return np.array(A__ )
elif is_tf_tensor(A__ ):
return obj.numpy()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A__ ):
return np.asarray(A__ )
else:
return obj
class lowerCamelCase__ ( __A ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase_ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
__UpperCAmelCase : List[str] = getattr(self , class_fields[0].name )
__UpperCAmelCase : Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__UpperCAmelCase : str = first_field.items()
__UpperCAmelCase : List[str] = True
else:
try:
__UpperCAmelCase : List[Any] = iter(lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = True
except TypeError:
__UpperCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase_ ):
if (
not isinstance(lowerCAmelCase_ , (list, tuple) )
or not len(lowerCAmelCase_ ) == 2
or not isinstance(element[0] , lowerCAmelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : Any = element[1]
elif first_field is not None:
__UpperCAmelCase : Any = first_field
else:
for field in class_fields:
__UpperCAmelCase : Optional[int] = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Any = v
def __delitem__( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def lowerCamelCase__ ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def lowerCamelCase__ ( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def lowerCamelCase__ ( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ):
'''simple docstring'''
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__UpperCAmelCase : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ )
super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ )
def __setitem__( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCamelCase__ ( __A , __A ):
"""simple docstring"""
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCamelCase__ ( __A ):
"""simple docstring"""
__a = """longest"""
__a = """max_length"""
__a = """do_not_pad"""
class lowerCamelCase__ ( __A ):
"""simple docstring"""
__a = """pt"""
__a = """tf"""
__a = """np"""
__a = """jax"""
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : List[ContextManager] ):
'''simple docstring'''
__UpperCAmelCase : int = context_managers
__UpperCAmelCase : List[Any] = ExitStack()
def __enter__( self : Optional[int] ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase_ )
def __exit__( self : Dict , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
self.stack.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = infer_framework(A__ )
if framework == "tf":
__UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : int = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = model_class.__name__
__UpperCAmelCase : str = infer_framework(A__ )
if framework == "tf":
__UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] = "" , _UpperCamelCase : Union[str, Any] = "." ) -> Optional[int]:
'''simple docstring'''
def _flatten_dict(_UpperCamelCase : Dict , _UpperCamelCase : List[str]="" , _UpperCamelCase : str="." ):
for k, v in d.items():
__UpperCAmelCase : Optional[int] = str(A__ ) + delimiter + str(A__ ) if parent_key else k
if v and isinstance(A__ , A__ ):
yield from flatten_dict(A__ , A__ , delimiter=A__ ).items()
else:
yield key, v
return dict(_flatten_dict(A__ , A__ , A__ ) )
@contextmanager
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] = False ) -> Tuple:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> Optional[Any]:
'''simple docstring'''
if is_numpy_array(A__ ):
return np.transpose(A__ , axes=A__ )
elif is_torch_tensor(A__ ):
return array.T if axes is None else array.permute(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.transpose(A__ , perm=A__ )
elif is_jax_tensor(A__ ):
return jnp.transpose(A__ , axes=A__ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A__ )}.''' )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
if is_numpy_array(A__ ):
return np.reshape(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.reshape(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.reshape(A__ , A__ )
elif is_jax_tensor(A__ ):
return jnp.reshape(A__ , A__ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A__ )}.''' )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[str]:
'''simple docstring'''
if is_numpy_array(A__ ):
return np.squeeze(A__ , axis=A__ )
elif is_torch_tensor(A__ ):
return array.squeeze() if axis is None else array.squeeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.squeeze(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.squeeze(A__ , axis=A__ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A__ )}.''' )
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if is_numpy_array(A__ ):
return np.expand_dims(A__ , A__ )
elif is_torch_tensor(A__ ):
return array.unsqueeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.expand_dims(A__ , axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.expand_dims(A__ , axis=A__ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A__ )}.''' )
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
if is_numpy_array(A__ ):
return np.size(A__ )
elif is_torch_tensor(A__ ):
return array.numel()
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.size(A__ )
elif is_jax_tensor(A__ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A__ )}.''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A__ , (tuple, list) ):
__UpperCAmelCase : Union[str, Any] = [f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : Dict = f'''{repo_id}--{value}'''
return auto_map
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
for base_class in inspect.getmro(A__ ):
__UpperCAmelCase : List[str] = base_class.__module__
__UpperCAmelCase : List[str] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 115 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = MBartTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : str = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase_ : List[Any] = src_lang
UpperCAmelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None:
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 268 | 0 |
import os
import sys
import unittest
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ : Optional[Any] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowercase__ : List[str] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = get_test_to_tester_mapping(lowerCAmelCase_ )
lowerCAmelCase = get_test_to_tester_mapping(lowerCAmelCase_ )
lowerCAmelCase = {"BertModelTest": "BertModelTester"}
lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = get_model_to_test_mapping(lowerCAmelCase_ )
lowerCAmelCase = get_model_to_test_mapping(lowerCAmelCase_ )
lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = get_model_to_tester_mapping(lowerCAmelCase_ )
lowerCAmelCase = get_model_to_tester_mapping(lowerCAmelCase_ )
lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 338 |
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (__A ):
def __init__( self: Any , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 306 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : Dict = len(A__ )
# We need to create solution object to save path.
_lowercase : Dict = [[0 for _ in range(A__ )] for _ in range(A__ )]
_lowercase : Optional[int] = run_maze(A__ , 0 , 0 , A__ )
if solved:
print("\n".join(str(A__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _A ( snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_lowercase : Any = len(A__ )
# Final check point.
if i == j == (size - 1):
_lowercase : Optional[int] = 1
return True
_lowercase : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds
_lowercase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowercase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowercase : List[str] = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
_lowercase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __A , unittest.TestCase ):
UpperCamelCase : Any = BertTokenizer
UpperCamelCase : int = BertTokenizerFast
UpperCamelCase : Optional[Any] = True
UpperCamelCase : List[str] = True
UpperCamelCase : int = filter_non_english
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: Tuple =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_: Any ="unwanted, running"
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] ="UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.tokenize(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: int =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Any =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Dict =tokenizer.encode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: List[Any] ="UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.tokenize(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Tuple =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =BasicTokenizer()
SCREAMING_SNAKE_CASE_: Dict ="a\n'll !!to?'d of, can't."
SCREAMING_SNAKE_CASE_: List[str] =["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
SCREAMING_SNAKE_CASE_: Tuple ={}
for i, token in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_: Optional[int] =i
SCREAMING_SNAKE_CASE_: Optional[Any] =WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE_: Any =tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Dict =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: str =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_: Tuple =tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE_: List[Any] =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =["的", "人", "有"]
SCREAMING_SNAKE_CASE_: Tuple ="".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Any =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Dict =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Dict =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Any =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: List[Any] =False
SCREAMING_SNAKE_CASE_: Optional[Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: List[Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: int =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_: Tuple =[
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 173 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = '''dummy_data'''
__magic_name__ = '''datasets'''
__magic_name__ = False
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = dataset_name
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : Tuple = use_local_dummy_data
UpperCAmelCase_ : int = config
# download_callbacks take a single url as input
UpperCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase_ : Dict = str(lowerCAmelCase_ )
# to be downloaded
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._dummy_file is None:
UpperCAmelCase_ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase_ : Union[str, Any] = cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self._bucket_url is None:
UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase_ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return path
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
UpperCAmelCase_ : Optional[int] = single_urls
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
UpperCAmelCase_ : int = value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
UpperCAmelCase_ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url )
UpperCAmelCase_ : Union[str, Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
def _iter_archive_members(lowerCAmelCase_ : Dict ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase_ : str = Path(self.dummy_file ).parent
UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase_ : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = [paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any]) -> List[str]:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }") | 232 |
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 13, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_=[16, 32, 64, 128], SCREAMING_SNAKE_CASE_ = 7, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 37, SCREAMING_SNAKE_CASE_ = "gelu", SCREAMING_SNAKE_CASE_ = 0.1, SCREAMING_SNAKE_CASE_ = 0.1, SCREAMING_SNAKE_CASE_ = 10, SCREAMING_SNAKE_CASE_ = 0.02, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2], SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 2, ) -> Tuple:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Dict = image_size
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Optional[int] = use_labels
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Tuple = encoder_stride
UpperCamelCase : List[Any] = num_attention_outputs
UpperCamelCase : int = embed_dim
UpperCamelCase : Dict = embed_dim + 1
UpperCamelCase : List[Any] = resolution
UpperCamelCase : List[str] = depths
UpperCamelCase : Optional[Any] = hidden_sizes
UpperCamelCase : Optional[Any] = dim
UpperCamelCase : Any = mlp_expansion_ratio
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : List[str] = TFEfficientFormerModel(config=lowerCAmelCase_ )
UpperCamelCase : List[str] = model(lowerCAmelCase_, training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase : int = self.type_sequence_label_size
UpperCamelCase : Optional[int] = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
UpperCamelCase : Any = model(lowerCAmelCase_, labels=lowerCAmelCase_, training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
UpperCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[Any] = model(lowerCAmelCase_, labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
UpperCAmelCase__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = TFEfficientFormerModelTester(self )
UpperCamelCase : Dict = ConfigTester(
self, config_class=lowerCAmelCase_, has_text_modality=lowerCAmelCase_, hidden_size=37 )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def snake_case_ ( self ) -> Optional[int]:
pass
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(lowerCAmelCase_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], lowerCAmelCase_ )
def snake_case_ ( self ) -> int:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = model_class(lowerCAmelCase_ )
UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_ ), training=lowerCAmelCase_ )
UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : List[str] = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ), lowerCAmelCase_ )
if hasattr(self.model_tester, 'encoder_seq_length' ):
UpperCamelCase : Tuple = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, 'chunk_length' ) and self.model_tester.chunk_length > 1:
UpperCamelCase : int = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
UpperCamelCase : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_, (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ), lowerCAmelCase_ )
UpperCamelCase : Dict = getattr(self.model_tester, 'seq_length', lowerCAmelCase_ )
UpperCamelCase : List[str] = getattr(self.model_tester, 'decoder_seq_length', lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], )
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = True
check_hidden_states_output(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Any:
UpperCamelCase : Any = super()._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_, return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def snake_case_ ( self ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[Any] = getattr(self.model_tester, 'seq_length', lowerCAmelCase_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester, 'encoder_seq_length', lowerCAmelCase_ )
UpperCamelCase : Tuple = getattr(self.model_tester, 'key_length', lowerCAmelCase_ )
UpperCamelCase : Dict = getattr(self.model_tester, 'chunk_length', lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester, 'num_hashes' ):
UpperCamelCase : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = model_class(lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_ ), training=lowerCAmelCase_ )
UpperCamelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ), self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase : Any = True
UpperCamelCase : Tuple = model_class(lowerCAmelCase_ )
UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_ ), training=lowerCAmelCase_ )
UpperCamelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ), self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def snake_case_ ( self ) -> Optional[Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase : List[Any] = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase : str = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase : List[Any] = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def UpperCamelCase ( ) -> int:
UpperCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[int] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
UpperCamelCase : Optional[int] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Optional[Any] = image_processor(images=lowerCAmelCase_, return_tensors='tf' )
# forward pass
UpperCamelCase : Optional[Any] = model(**lowerCAmelCase_, training=lowerCAmelCase_ )
# verify the logits
UpperCamelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCAmelCase_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : List[str] = image_processor(images=lowerCAmelCase_, return_tensors='tf' )
# forward pass
UpperCamelCase : Union[str, Any] = model(**lowerCAmelCase_, training=lowerCAmelCase_ )
# verify the logits
UpperCamelCase : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase_ )
UpperCamelCase : List[str] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCAmelCase_, atol=1e-4 ) )
| 119 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ (__A ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = relative_attention
UpperCAmelCase_ : List[Any] = position_biased_input
UpperCAmelCase_ : Dict = pos_att_type
UpperCAmelCase_ : Optional[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_config()
UpperCAmelCase_ : int = 300
return config
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]:
UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = DebertaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 268 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__( lowerCamelCase):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set())
@pytest.fixture
def __magic_name__( lowerCamelCase):
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = metric_id
class a__ :
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [MetricMock(__A ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _snake_case (self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock())
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
if "tmp_path" in args:
__lowerCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args)
with pytest.warns(A__, match='''https://huggingface.co/docs/evaluate'''):
func(*A__)
| 174 |
"""simple docstring"""
import os
def snake_case ( ):
with open(os.path.dirname(A__ ) + "/grid.txt" ) as f:
UpperCAmelCase_ : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
UpperCAmelCase_ : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ : Tuple = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 268 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 187 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase_ : Any = 6
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : int = 1_92
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Any = num_heads
return config
def snake_case ( A__ ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[str] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : int = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Any = "swin." + name
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split("." )
UpperCAmelCase_ : str = int(key_split[2] )
UpperCAmelCase_ : Union[str, Any] = int(key_split[4] )
UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[:dim, :]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ )
UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ )
model.eval()
UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ )
model.load_state_dict(A__ )
UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 268 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = int(A__ )
assert noofclusters < len(A__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE_ = list(range(len(A__ ) ) )
shuffle(A__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE_ = tf.placeholder('float64' , [dim] )
SCREAMING_SNAKE_CASE_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(A__ , A__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE_ = [tf.Variable(0 ) for i in range(len(A__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE_ = tf.placeholder('int32' )
SCREAMING_SNAKE_CASE_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A__ , A__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE_ = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE_ = tf.reduce_mean(A__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE_ = tf.placeholder('float' , [dim] )
SCREAMING_SNAKE_CASE_ = tf.placeholder('float' , [dim] )
SCREAMING_SNAKE_CASE_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE_ = tf.placeholder('float' , [noofclusters] )
SCREAMING_SNAKE_CASE_ = tf.argmin(A__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(A__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE_ = 1_00
for _ in range(A__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A__ ) ):
SCREAMING_SNAKE_CASE_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE_ = [
sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE_ = sess.run(
A__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE_ = [
vectors[i]
for i in range(len(A__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE_ = sess.run(
A__ , feed_dict={mean_input: array(A__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE_ = sess.run(A__ )
SCREAMING_SNAKE_CASE_ = sess.run(A__ )
return centroids, assignments | 225 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''rwkv'''
__magic_name__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = rescale_every
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 268 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __a ( _SCREAMING_SNAKE_CASE = 3 ) ->List[Any]:
if isinstance(A__ , A__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(A__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
a__: Any = QuantumRegister(A__ , 'qr' )
a__: str = ClassicalRegister(A__ , 'cr' )
a__: int = QuantumCircuit(A__ , A__ )
a__: Dict = number_of_qubits
for i in range(A__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(A__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , A__ , A__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(A__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(A__ , A__ )
# simulate with 10000 shots
a__: Dict = Aer.get_backend('qasm_simulator' )
a__: str = execute(A__ , A__ , shots=10000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 290 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : Any , UpperCamelCase : Any=13 , UpperCamelCase : Tuple=2 , UpperCamelCase : Any=24 , UpperCamelCase : Tuple=16 , UpperCamelCase : int=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=37 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : List[Any]=10 , UpperCamelCase : int=0.02 , UpperCamelCase : Any=None , UpperCamelCase : str=2 , UpperCamelCase : int=2 , ):
'''simple docstring'''
__UpperCAmelCase : str = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Dict = max_length
__UpperCAmelCase : str = num_mel_bins
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : int = scope
__UpperCAmelCase : Dict = frequency_stride
__UpperCAmelCase : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCAmelCase : List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCAmelCase : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCAmelCase : str = frequency_out_dimension * time_out_dimension
__UpperCAmelCase : Any = num_patches + 2
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_values, labels
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ASTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__a = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ASTModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = ASTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__UpperCAmelCase : Tuple = torchaudio.load(A__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.default_feature_extractor
__UpperCAmelCase : Dict = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowerCAmelCase_ )
__UpperCAmelCase : Tuple = self.default_feature_extractor
__UpperCAmelCase : List[str] = prepare_audio()
__UpperCAmelCase : Any = audio.squeeze().numpy()
__UpperCAmelCase : int = feature_extractor(lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
# verify the logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 115 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def snake_case ( A__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( A__ ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( A__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
UpperCAmelCase_ : List[str] = Node(1 )
UpperCAmelCase_ : Any = Node(2 )
UpperCAmelCase_ : Optional[Any] = Node(3 )
UpperCAmelCase_ : Union[str, Any] = Node(4 )
UpperCAmelCase_ : int = Node(5 )
UpperCAmelCase_ : Optional[int] = Node(6 )
UpperCAmelCase_ : Any = Node(7 )
UpperCAmelCase_ : List[str] = Node(8 )
UpperCAmelCase_ : List[Any] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("Tree is: " )
display(A__ )
if __name__ == "__main__":
main()
| 268 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[int] = None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
lowerCAmelCase = Node(1 )
lowerCAmelCase = Node(2 )
lowerCAmelCase = Node(3 )
lowerCAmelCase = Node(4 )
lowerCAmelCase = Node(5 )
return tree
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict:
lowerCAmelCase = []
if root is None:
return output
lowerCAmelCase = deque([root] )
while process_queue:
lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Any:
lowerCAmelCase = []
def populate_output(snake_case__ , snake_case__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A__ , A__ )
return output
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Tuple:
lowerCAmelCase = []
def populate_output(snake_case__ , snake_case__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A__ , A__ )
return output
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if root is None:
return []
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = height(A__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A__ , A__ ) )
lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(A__ , A__ ) )
lowerCAmelCase = 0
return output
def SCREAMING_SNAKE_CASE_ ( ) -> str: # Main function for testing.
lowerCAmelCase = make_tree()
print(f"In-order Traversal: {inorder(A__ )}" )
print(f"Pre-order Traversal: {preorder(A__ )}" )
print(f"Post-order Traversal: {postorder(A__ )}" , '''\n''' )
print(f"Height of Tree: {height(A__ )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(A__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(A__ ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(A__ , level=A__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 338 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
if index == number_of_items:
return 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack(
A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 )
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
UpperCamelCase = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
return x[0]
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_letter_count(A__ )
_SCREAMING_SNAKE_CASE = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A__ )
_SCREAMING_SNAKE_CASE = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=A__ )
_SCREAMING_SNAKE_CASE = "".join(freq_to_letter[freq] )
_SCREAMING_SNAKE_CASE = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A__ ,reverse=A__ )
_SCREAMING_SNAKE_CASE = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_frequency_order(A__ )
_SCREAMING_SNAKE_CASE = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 0 |
'''simple docstring'''
import math
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = 2
_lowercase : Tuple = int(math.sqrt(A__ ) ) # Size of every segment
_lowercase : Optional[int] = [True] * (end + 1)
_lowercase : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
_lowercase : str = False
start += 1
prime += in_prime
_lowercase : Union[str, Any] = end + 1
_lowercase : str = min(2 * end , A__ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
_lowercase : Dict = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Optional[Any] = high + 1
_lowercase : str = min(high + end , A__ )
return prime
print(sieve(10**6))
| 250 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
"""simple docstring"""
import numpy
class a :
def __init__( self : Optional[int] , lowerCAmelCase : numpy.ndarray , lowerCAmelCase : numpy.ndarray ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE_: Tuple =numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE_: Union[str, Any] =numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE_: Union[str, Any] =numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE_: str =output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE_: Tuple =numpy.zeros(output_array.shape )
def lowerCamelCase__ ( self : Dict ) -> numpy.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE_: int =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE_: Optional[Any] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCamelCase__ ( self : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE_: Tuple =numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE_: Optional[int] =numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : numpy.ndarray , lowerCAmelCase : int , lowerCAmelCase : bool ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE_: int =self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE_: Union[str, Any] =numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : numpy.ndarray ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =input_arr
SCREAMING_SNAKE_CASE_: List[Any] =sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE_: Tuple =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE_: List[Any] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __magic_name__ ( lowercase ):
return 1 / (1 + numpy.exp(-value ))
def __magic_name__ ( lowercase ):
return (value) * (1 - (value))
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE_: Union[str, Any] =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE_: int =TwoHiddenLayerNeuralNetwork(
input_array=A__ , output_array=A__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=A__ , iterations=10 , give_loss=A__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 173 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''detr'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Optional[Any] = backbone_config
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Dict = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Optional[int] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = init_std
UpperCAmelCase_ : Union[str, Any] = init_xavier_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : str = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = mask_loss_coefficient
UpperCAmelCase_ : Dict = dice_loss_coefficient
UpperCAmelCase_ : Any = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]:
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 12
| 268 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=__A):
'''simple docstring'''
_A = ['onnx']
def __init__( self :Optional[int] , *a :Dict , **a :Optional[Any] ) -> List[Any]:
requires_backends(self , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls :Tuple , *a :List[Any] , **a :List[str] ) -> Dict:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowerCamelCase ( cls :Union[str, Any] , *a :Dict , **a :Tuple ) -> Optional[Any]:
requires_backends(cls , ["onnx"] ) | 232 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = BasicTokenizer()
UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : Tuple = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ["的", "人", "有"]
UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 0 |
"""simple docstring"""
__snake_case : Union[str, Any] = 8.314_462 # Unit - J mol-1 K-1
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 269 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Any=3_2000 , _SCREAMING_SNAKE_CASE: Optional[int]=768 , _SCREAMING_SNAKE_CASE: Optional[Any]=12 , _SCREAMING_SNAKE_CASE: Optional[Any]=3072 , _SCREAMING_SNAKE_CASE: List[str]="gelu_new" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Any=512 , _SCREAMING_SNAKE_CASE: List[str]=4 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-12 , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Tuple=2 , **_SCREAMING_SNAKE_CASE: str , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : List[Any] = type_vocab_size
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : int = use_tpu_fourier_optimizations
__lowerCAmelCase : int = tpu_short_seq_length | 269 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : List[Any] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'rag'
SCREAMING_SNAKE_CASE = True
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: List[Any]=" / " , _SCREAMING_SNAKE_CASE: List[Any]=" // " , _SCREAMING_SNAKE_CASE: int=5 , _SCREAMING_SNAKE_CASE: List[Any]=300 , _SCREAMING_SNAKE_CASE: str=768 , _SCREAMING_SNAKE_CASE: List[str]=8 , _SCREAMING_SNAKE_CASE: Tuple="wiki_dpr" , _SCREAMING_SNAKE_CASE: Any="train" , _SCREAMING_SNAKE_CASE: Optional[Any]="compressed" , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Tuple=None , **_SCREAMING_SNAKE_CASE: Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , prefix=_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowerCAmelCase : int = kwargs.pop("question_encoder")
__lowerCAmelCase : Any = question_encoder_config.pop("model_type")
__lowerCAmelCase : Optional[Any] = kwargs.pop("generator")
__lowerCAmelCase : List[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
__lowerCAmelCase : Union[str, Any] = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = reduce_loss
__lowerCAmelCase : int = label_smoothing
__lowerCAmelCase : Union[str, Any] = exclude_bos_score
__lowerCAmelCase : List[str] = do_marginalize
__lowerCAmelCase : Optional[int] = title_sep
__lowerCAmelCase : Tuple = doc_sep
__lowerCAmelCase : Optional[Any] = n_docs
__lowerCAmelCase : List[str] = max_combined_length
__lowerCAmelCase : Optional[int] = dataset
__lowerCAmelCase : str = dataset_split
__lowerCAmelCase : str = index_name
__lowerCAmelCase : str = retrieval_vector_size
__lowerCAmelCase : Union[str, Any] = retrieval_batch_size
__lowerCAmelCase : Optional[Any] = passages_path
__lowerCAmelCase : List[str] = index_path
__lowerCAmelCase : str = use_dummy_dataset
__lowerCAmelCase : Optional[int] = output_retrieved
__lowerCAmelCase : int = do_deduplication
__lowerCAmelCase : List[str] = use_cache
if self.forced_eos_token_id is None:
__lowerCAmelCase : Optional[Any] = getattr(self.generator , "forced_eos_token_id" , _SCREAMING_SNAKE_CASE)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , _SCREAMING_SNAKE_CASE: PretrainedConfig , _SCREAMING_SNAKE_CASE: PretrainedConfig , **_SCREAMING_SNAKE_CASE: Tuple) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__)
__lowerCAmelCase : str = self.question_encoder.to_dict()
__lowerCAmelCase : Optional[Any] = self.generator.to_dict()
__lowerCAmelCase : Any = self.__class__.model_type
return output | 269 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowercase ( __snake_case ,__snake_case ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
__lowerCAmelCase : Union[str, Any] = model_name.find("patch" )
__lowerCAmelCase : List[str] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
__lowerCAmelCase : List[str] = XCLIPVisionConfig(patch_size=__snake_case ,num_frames=__snake_case )
if "large" in model_name:
__lowerCAmelCase : Union[str, Any] = 768
__lowerCAmelCase : List[Any] = 3_072
__lowerCAmelCase : int = 12
__lowerCAmelCase : Union[str, Any] = 1_024
__lowerCAmelCase : Union[str, Any] = 4_096
__lowerCAmelCase : Dict = 16
__lowerCAmelCase : Union[str, Any] = 24
__lowerCAmelCase : Optional[Any] = 768
__lowerCAmelCase : Optional[Any] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase : int = 336
__lowerCAmelCase : Dict = XCLIPConfig.from_text_vision_configs(__snake_case ,__snake_case )
if "large" in model_name:
__lowerCAmelCase : List[str] = 768
return config
def _lowercase ( __snake_case ) -> Optional[Any]:
# text encoder
if name == "token_embedding.weight":
__lowerCAmelCase : Union[str, Any] = name.replace("token_embedding.weight" ,"text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
__lowerCAmelCase : List[Any] = name.replace("positional_embedding" ,"text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
__lowerCAmelCase : Any = name.replace("ln_1" ,"layer_norm1" )
if "ln_2" in name:
__lowerCAmelCase : Dict = name.replace("ln_2" ,"layer_norm2" )
if "c_fc" in name:
__lowerCAmelCase : Optional[int] = name.replace("c_fc" ,"fc1" )
if "c_proj" in name:
__lowerCAmelCase : List[str] = name.replace("c_proj" ,"fc2" )
if name.startswith("transformer.resblocks" ):
__lowerCAmelCase : str = name.replace("transformer.resblocks" ,"text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
__lowerCAmelCase : List[Any] = name.replace("attn.out_proj" ,"self_attn.out_proj" )
if "ln_final" in name:
__lowerCAmelCase : List[Any] = name.replace("ln_final" ,"text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
__lowerCAmelCase : Union[str, Any] = name.replace("visual.class_embedding" ,"vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
__lowerCAmelCase : List[Any] = name.replace("visual.positional_embedding" ,"vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
__lowerCAmelCase : Optional[int] = name.replace("visual.transformer.resblocks" ,"vision_model.encoder.layers" )
if "visual.conv1" in name:
__lowerCAmelCase : Dict = name.replace("visual.conv1" ,"vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
__lowerCAmelCase : int = name.replace("visual.ln_pre" ,"vision_model.pre_layernorm" )
if "visual.ln_post" in name:
__lowerCAmelCase : Optional[Any] = name.replace("visual.ln_post" ,"vision_model.post_layernorm" )
if "visual.proj" in name:
__lowerCAmelCase : Dict = name.replace("visual.proj" ,"visual_projection.weight" )
if "text_projection" in name:
__lowerCAmelCase : Optional[int] = name.replace("text_projection" ,"text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
__lowerCAmelCase : List[Any] = name.replace("prompts_visual_proj" ,"prompts_visual_projection" )
if "prompts_visual_ln" in name:
__lowerCAmelCase : int = name.replace("prompts_visual_ln" ,"prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
__lowerCAmelCase : Union[str, Any] = name.replace("positional" ,"position" )
if name.startswith("mit.resblocks" ):
__lowerCAmelCase : str = name.replace("mit.resblocks" ,"mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
__lowerCAmelCase : int = name.replace("prompts_generator.norm" ,"prompts_generator.layernorm" )
return name
def _lowercase ( __snake_case ,__snake_case ) -> List[str]:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : int = orig_state_dict.pop(__snake_case )
if "attn.in_proj" in key:
__lowerCAmelCase : str = key.split("." )
if key.startswith("visual" ):
__lowerCAmelCase : Optional[Any] = key_split[3]
__lowerCAmelCase : List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__lowerCAmelCase : List[str] = val[
:dim, :
]
__lowerCAmelCase : int = val[
dim : dim * 2, :
]
__lowerCAmelCase : List[str] = val[
-dim:, :
]
else:
__lowerCAmelCase : Union[str, Any] = val[
:dim
]
__lowerCAmelCase : Any = val[
dim : dim * 2
]
__lowerCAmelCase : int = val[
-dim:
]
else:
if "weight" in key:
__lowerCAmelCase : Optional[Any] = val[
:dim, :
]
__lowerCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
__lowerCAmelCase : List[str] = val[
-dim:, :
]
else:
__lowerCAmelCase : Optional[int] = val[:dim]
__lowerCAmelCase : List[str] = val[
dim : dim * 2
]
__lowerCAmelCase : Optional[int] = val[-dim:]
elif key.startswith("mit" ):
__lowerCAmelCase : Tuple = key_split[2]
__lowerCAmelCase : int = config.vision_config.mit_hidden_size
if "weight" in key:
__lowerCAmelCase : int = val[:dim, :]
__lowerCAmelCase : int = val[dim : dim * 2, :]
__lowerCAmelCase : Dict = val[-dim:, :]
else:
__lowerCAmelCase : Dict = val[:dim]
__lowerCAmelCase : List[Any] = val[dim : dim * 2]
__lowerCAmelCase : Dict = val[-dim:]
else:
__lowerCAmelCase : Union[str, Any] = key_split[2]
__lowerCAmelCase : Dict = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase : List[Any] = val[:dim, :]
__lowerCAmelCase : List[str] = val[
dim : dim * 2, :
]
__lowerCAmelCase : Tuple = val[-dim:, :]
else:
__lowerCAmelCase : str = val[:dim]
__lowerCAmelCase : Tuple = val[
dim : dim * 2
]
__lowerCAmelCase : List[Any] = val[-dim:]
else:
__lowerCAmelCase : str = rename_key(__snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__lowerCAmelCase : Dict = val.T
__lowerCAmelCase : Dict = val
return orig_state_dict
def _lowercase ( __snake_case ) -> List[Any]:
if num_frames == 8:
__lowerCAmelCase : Any = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__lowerCAmelCase : Optional[int] = "eating_spaghetti.npy"
elif num_frames == 32:
__lowerCAmelCase : List[str] = "eating_spaghetti_32_frames.npy"
__lowerCAmelCase : List[str] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename=__snake_case ,repo_type="dataset" ,)
__lowerCAmelCase : List[str] = np.load(__snake_case )
return list(__snake_case )
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=False ) -> Tuple:
__lowerCAmelCase : Dict = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__lowerCAmelCase : List[Any] = model_to_url[model_name]
__lowerCAmelCase : Dict = 8
if "16-frames" in model_name:
__lowerCAmelCase : List[Any] = 16
elif "shot" in model_name:
__lowerCAmelCase : Tuple = 32
__lowerCAmelCase : List[str] = get_xclip_config(__snake_case ,__snake_case )
__lowerCAmelCase : int = XCLIPModel(__snake_case )
model.eval()
if "drive" in checkpoint_url:
__lowerCAmelCase : Optional[int] = "pytorch_model.bin"
gdown.cached_download(__snake_case ,__snake_case ,quiet=__snake_case )
__lowerCAmelCase : int = torch.load(__snake_case ,map_location="cpu" )["model"]
else:
__lowerCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(__snake_case )["model"]
__lowerCAmelCase : Optional[Any] = convert_state_dict(__snake_case ,__snake_case )
__lowerCAmelCase : List[str] = XCLIPModel(__snake_case )
__lowerCAmelCase , __lowerCAmelCase : Dict = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__lowerCAmelCase : Union[str, Any] = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__lowerCAmelCase : Dict = VideoMAEImageProcessor(size=__snake_case )
__lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
__lowerCAmelCase : Optional[Any] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
__lowerCAmelCase : List[str] = XCLIPProcessor(image_processor=__snake_case ,tokenizer=__snake_case )
__lowerCAmelCase : Union[str, Any] = prepare_video(__snake_case )
__lowerCAmelCase : Any = processor(
text=["playing sports", "eating spaghetti", "go shopping"] ,videos=__snake_case ,return_tensors="pt" ,padding=__snake_case )
print("Shape of pixel values:" ,inputs.pixel_values.shape )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**__snake_case )
# Verify outputs
__lowerCAmelCase : List[Any] = outputs.logits_per_video
__lowerCAmelCase : List[Any] = logits_per_video.softmax(dim=1 )
print("Probs:" ,__snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__lowerCAmelCase : Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__lowerCAmelCase : str = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
__lowerCAmelCase : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__lowerCAmelCase : Union[str, Any] = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
__lowerCAmelCase : List[str] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase : Any = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__lowerCAmelCase : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__lowerCAmelCase : Optional[Any] = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__lowerCAmelCase : Any = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__lowerCAmelCase : List[Any] = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__lowerCAmelCase : Union[str, Any] = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__lowerCAmelCase : str = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__lowerCAmelCase : Tuple = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__lowerCAmelCase : Optional[Any] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__lowerCAmelCase : Any = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__lowerCAmelCase : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__lowerCAmelCase : Dict = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__lowerCAmelCase : int = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(__snake_case ,organization="nielsr" )
processor.push_to_hub(__snake_case ,organization="nielsr" )
slow_tokenizer.push_to_hub(__snake_case ,organization="nielsr" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : int = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 269 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
def get_matched_characters(__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : int = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowerCAmelCase : Optional[int] = int(max(0 ,i - limit ) )
__lowerCAmelCase : Any = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
__lowerCAmelCase : List[Any] = F"""{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}"""
return "".join(__snake_case )
# matching characters
__lowerCAmelCase : Dict = get_matched_characters(__snake_case ,__snake_case )
__lowerCAmelCase : Any = get_matched_characters(__snake_case ,__snake_case )
__lowerCAmelCase : Optional[int] = len(__snake_case )
# transposition
__lowerCAmelCase : Tuple = (
len([(ca, ca) for ca, ca in zip(__snake_case ,__snake_case ) if ca != ca] ) // 2
)
if not match_count:
__lowerCAmelCase : List[Any] = 0.0
else:
__lowerCAmelCase : List[Any] = (
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowerCAmelCase : Optional[int] = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world')) | 269 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case : int = {'UserAgent': UserAgent().random}
def _lowercase ( __snake_case ) -> dict:
__lowerCAmelCase : str = script.contents[0]
__lowerCAmelCase : Union[str, Any] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A__ :
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
__lowerCAmelCase : Optional[Any] = self.get_json()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE).text
__lowerCAmelCase : Any = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser").find_all("script")
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__( self: List[Any]) -> str:
"""simple docstring"""
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self: Dict) -> str:
"""simple docstring"""
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def _lowercase ( __snake_case = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
__lowerCAmelCase : Optional[Any] = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,__snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[int] = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""") | 269 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : List[str] = len(__snake_case )
__lowerCAmelCase : Dict = sum(__snake_case )
__lowerCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
__lowerCAmelCase : List[Any] = True
for i in range(1 ,s + 1 ):
__lowerCAmelCase : int = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
__lowerCAmelCase : int = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
__lowerCAmelCase : str = s - 2 * j
break
return diff | 269 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float(moles / volume ) * nfactor )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'BlipImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.image_processor
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: ImageInput = None , _SCREAMING_SNAKE_CASE: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[bool, str, PaddingStrategy] = False , _SCREAMING_SNAKE_CASE: Union[bool, str, TruncationStrategy] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None:
__lowerCAmelCase : Tuple = self.tokenizer
__lowerCAmelCase : str = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
__lowerCAmelCase : List[str] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE)
return encoding_image_processor
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: List[Any]) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer.model_input_names
__lowerCAmelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 269 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> int:
__lowerCAmelCase : Any = HfArgumentParser(__snake_case )
__lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(args=__snake_case )
try:
__lowerCAmelCase : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCAmelCase : Dict = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__lowerCAmelCase : Tuple = " ".join(str(__snake_case ).split(" " )[:-1] )
__lowerCAmelCase : Optional[Any] = ""
__lowerCAmelCase : Any = eval(str(__snake_case ).split(" " )[-1] )
__lowerCAmelCase : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
__lowerCAmelCase : int = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main() | 269 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 1 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : List[Any] = 0
for ch in input_str:
__lowerCAmelCase : Dict = ord(__snake_case )
__lowerCAmelCase : Union[str, Any] = pow(2 ,__snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__snake_case ,__snake_case )
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = emb.weight.shape
__lowerCAmelCase : Optional[Any] = nn.Linear(__snake_case ,__snake_case ,bias=__snake_case )
__lowerCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def _lowercase ( __snake_case ,__snake_case="facebook/mbart-large-en-ro" ,__snake_case=False ,__snake_case=False ) -> Dict:
__lowerCAmelCase : int = torch.load(__snake_case ,map_location="cpu" )["model"]
remove_ignore_keys_(__snake_case )
__lowerCAmelCase : Dict = state_dict["encoder.embed_tokens.weight"].shape[0]
__lowerCAmelCase : str = MBartConfig.from_pretrained(__snake_case ,vocab_size=__snake_case )
if mbart_aa and finetuned:
__lowerCAmelCase : Optional[Any] = "relu"
__lowerCAmelCase : List[str] = state_dict["decoder.embed_tokens.weight"]
__lowerCAmelCase : Optional[int] = MBartForConditionalGeneration(__snake_case )
model.model.load_state_dict(__snake_case )
if finetuned:
__lowerCAmelCase : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__snake_case : int = parser.parse_args()
__snake_case : Union[str, Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 269 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Dict = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A__ :
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = data
__lowerCAmelCase : Node | None = None
def __iter__( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self
__lowerCAmelCase : Any = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_SCREAMING_SNAKE_CASE)
yield node.data
__lowerCAmelCase : str = node.next_node
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> bool:
"""simple docstring"""
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__snake_case : Union[str, Any] = Node(1)
__snake_case : List[str] = Node(2)
__snake_case : Union[str, Any] = Node(3)
__snake_case : str = Node(4)
print(root_node.has_loop) # False
__snake_case : List[Any] = root_node.next_node
print(root_node.has_loop) # True
__snake_case : int = Node(5)
__snake_case : Any = Node(6)
__snake_case : Any = Node(5)
__snake_case : Any = Node(6)
print(root_node.has_loop) # False
__snake_case : Tuple = Node(1)
print(root_node.has_loop) # False | 269 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case : List[str] = logging.getLogger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=-1) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = label_idx
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Union[Split, str]) -> List[InputExample]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = mode.value
__lowerCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F"""{mode}.txt""")
__lowerCAmelCase : str = 1
__lowerCAmelCase : Optional[int] = []
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as f:
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[Any] = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE))
guid_index += 1
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : str = []
else:
__lowerCAmelCase : Optional[Any] = line.split(" ")
words.append(splits[0])
if len(_SCREAMING_SNAKE_CASE) > 1:
labels.append(splits[self.label_idx].replace("\n" , ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE))
return examples
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: TextIO , _SCREAMING_SNAKE_CASE: TextIO , _SCREAMING_SNAKE_CASE: List) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__lowerCAmelCase : List[str] = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(_SCREAMING_SNAKE_CASE)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0])
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , "r") as f:
__lowerCAmelCase : int = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase : str = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Union[str, Any]) -> str:
"""simple docstring"""
super().__init__(label_idx=-2)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , "r") as f:
__lowerCAmelCase : str = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase : str = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[Split, str]) -> List[InputExample]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : str = mode.value
__lowerCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F"""{mode}.txt""")
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = []
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(_SCREAMING_SNAKE_CASE) == len(_SCREAMING_SNAKE_CASE)
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE))
guid_index += 1
return examples
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: TextIO , _SCREAMING_SNAKE_CASE: TextIO , _SCREAMING_SNAKE_CASE: List) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = preds_list[example_id]
__lowerCAmelCase : Tuple = ""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0)}) """
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE)
example_id += 1
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 269 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 1 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case : Dict = logging.getLogger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'masked_bert'
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any]=3_0522 , _SCREAMING_SNAKE_CASE: Any=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[Any]=3072 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: Dict=512 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=0.02 , _SCREAMING_SNAKE_CASE: Any=1e-12 , _SCREAMING_SNAKE_CASE: List[Any]=0 , _SCREAMING_SNAKE_CASE: str="topK" , _SCREAMING_SNAKE_CASE: Any="constant" , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : Tuple = pruning_method
__lowerCAmelCase : Optional[Any] = mask_init
__lowerCAmelCase : int = mask_scale | 269 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: Optional[Any]=0.01 , _SCREAMING_SNAKE_CASE: List[Any]=1000) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = p_stop
__lowerCAmelCase : Tuple = max_length
def __iter__( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCAmelCase : Tuple = random.random() < self.p_stop
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: List[str]=True) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = [
BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
for i in range(2)
]
__lowerCAmelCase : List[Any] = [list(_SCREAMING_SNAKE_CASE) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_SCREAMING_SNAKE_CASE) for shard in batch_sampler_shards] , [len(_SCREAMING_SNAKE_CASE) for e in expected])
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = BatchSampler(range(24) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = BatchSampler(range(24) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase : Dict = BatchSampler(range(21) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = BatchSampler(range(21) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase : Any = BatchSampler(range(22) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(22) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(20) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = BatchSampler(range(20) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is very small.
__lowerCAmelCase : str = BatchSampler(range(2) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = BatchSampler(range(2) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = BatchSampler(range(24) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = BatchSampler(range(24) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase : Tuple = BatchSampler(range(22) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = BatchSampler(range(22) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(21) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = BatchSampler(range(21) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is very small.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(2) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = BatchSampler(range(2) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = BatchSampler(range(24) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = BatchSampler(range(24) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(21) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(21) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase : Dict = BatchSampler(range(22) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = BatchSampler(range(22) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase : int = BatchSampler(range(20) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = BatchSampler(range(20) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is very small.
__lowerCAmelCase : Any = BatchSampler(range(2) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = BatchSampler(range(2) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = BatchSampler(range(24) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = BatchSampler(range(24) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(22) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(22) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase : Dict = BatchSampler(range(21) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = BatchSampler(range(21) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
# Check the shards when the dataset is very small.
__lowerCAmelCase : Dict = BatchSampler(range(2) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = BatchSampler(range(2) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCAmelCase : List[Any] = [BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]) , 3)
self.assertEqual(len(batch_sampler_shards[1]) , 2)
self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]])
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int=False , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> str:
"""simple docstring"""
random.seed(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = [
IterableDatasetShard(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE , num_processes=_SCREAMING_SNAKE_CASE , process_index=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , )
for i in range(_SCREAMING_SNAKE_CASE)
]
__lowerCAmelCase : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_SCREAMING_SNAKE_CASE)
iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCAmelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE))
self.assertTrue(len(_SCREAMING_SNAKE_CASE) % shard_batch_size == 0)
__lowerCAmelCase : str = []
for idx in range(0 , len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_SCREAMING_SNAKE_CASE) < len(_SCREAMING_SNAKE_CASE):
reference += reference
self.assertListEqual(_SCREAMING_SNAKE_CASE , reference[: len(_SCREAMING_SNAKE_CASE)])
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = 42
__lowerCAmelCase : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
# Edge case with a very small dataset
__lowerCAmelCase : Dict = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = BatchSampler(range(16) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = SkipBatchSampler(_SCREAMING_SNAKE_CASE , 2)
self.assertListEqual(list(_SCREAMING_SNAKE_CASE) , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = DataLoader(list(range(16)) , batch_size=4)
__lowerCAmelCase : Dict = skip_first_batches(_SCREAMING_SNAKE_CASE , num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = DataLoaderShard(list(range(16)) , batch_size=4)
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
Accelerator()
__lowerCAmelCase : Union[str, Any] = DataLoaderDispatcher(range(16) , batch_size=4)
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE):
self.assertEqual(dataloader.end_of_dataloader , idx == 3) | 269 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__snake_case : Any = random.Random()
def _lowercase ( __snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> int:
if rng is None:
__lowerCAmelCase : Union[str, Any] = global_rng
__lowerCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: Dict=2000 , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=1_6000 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=16 , _SCREAMING_SNAKE_CASE: Dict=64 , _SCREAMING_SNAKE_CASE: Optional[int]="hann_window" , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: List[Any]=7600 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-10 , _SCREAMING_SNAKE_CASE: Optional[int]=True , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : List[str] = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : List[str] = feature_size
__lowerCAmelCase : List[Any] = padding_value
__lowerCAmelCase : Optional[int] = sampling_rate
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = num_mel_bins
__lowerCAmelCase : Optional[int] = hop_length
__lowerCAmelCase : Union[str, Any] = win_length
__lowerCAmelCase : Dict = win_function
__lowerCAmelCase : str = fmin
__lowerCAmelCase : Tuple = fmax
__lowerCAmelCase : Union[str, Any] = mel_floor
__lowerCAmelCase : int = return_attention_mask
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[str]=False) -> List[Any]:
"""simple docstring"""
def _flatten(_SCREAMING_SNAKE_CASE: Tuple):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE))
if equal_length:
__lowerCAmelCase : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
__lowerCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowerCAmelCase : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Optional[Any]=False) -> Any:
"""simple docstring"""
if equal_length:
__lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowerCAmelCase : List[Any] = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowerCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = SpeechTaFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[str]) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0) - 1) < 1e-3))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="np").input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
__lowerCAmelCase : List[str] = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : Optional[int] = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Any = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : str = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : Optional[int] = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Any = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self.assertTrue(input_values[0][800:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1000])
self.assertTrue(input_values[0][1000:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1200])
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200)
__lowerCAmelCase : Dict = [floats_list((1, x))[0] for x in lengths]
__lowerCAmelCase : Any = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = feat_extract(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self._check_zero_mean_unit_variance(input_values[1][:1000])
self._check_zero_mean_unit_variance(input_values[2][:1200])
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : str = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="max_length" , return_tensors="np")
__lowerCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : List[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : List[Any] = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=1000 , padding="longest" , return_tensors="np")
__lowerCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000))
__lowerCAmelCase : Dict = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Optional[int] = feat_extract(
_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=2000 , padding="longest" , return_tensors="np")
__lowerCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Optional[int] = np.random.rand(100).astype(np.floataa)
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
__lowerCAmelCase : Any = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : List[str] = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
__lowerCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors="np").input_values
__lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
__lowerCAmelCase : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
__lowerCAmelCase : Any = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : str = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE) == len(_SCREAMING_SNAKE_CASE) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name])))
__lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase : List[str] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : int = feat_extract.model_input_names[0]
__lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase : Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : Optional[int] = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase : List[str] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.feat_extract_dict
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Any = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : Dict = [len(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
__lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : Union[str, Any] = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : Optional[Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.feat_extract_dict
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCAmelCase : str = [len(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
__lowerCAmelCase : str = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase : List[str] = min(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = feat_extract.num_mel_bins # hack!
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np")
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
__lowerCAmelCase : Optional[Any] = ds.sort("id").select(range(_SCREAMING_SNAKE_CASE))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03])
# fmt: on
__lowerCAmelCase : str = self._load_datasamples(1)
__lowerCAmelCase : Optional[int] = SpeechTaFeatureExtractor()
__lowerCAmelCase : str = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 9_3680))
self.assertTrue(torch.allclose(input_values[0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-6))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998])
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1)
__lowerCAmelCase : str = SpeechTaFeatureExtractor()
__lowerCAmelCase : List[str] = feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 366, 80))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : Union[str, Any] = '\\n\n'
__snake_case : List[str] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__snake_case : Union[str, Any] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string"),
}) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int = 16 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None) -> Optional[Any]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase : Any = "cuda"
else:
__lowerCAmelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase : List[Any] = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase : Optional[Any] = model.config.max_length - 1
else:
__lowerCAmelCase : List[str] = model.config.max_length
__lowerCAmelCase : List[str] = tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="pt" , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = encodings["input_ids"]
__lowerCAmelCase : Union[str, Any] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase : str = []
__lowerCAmelCase : List[str] = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)):
__lowerCAmelCase : Union[str, Any] = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Optional[Any] = encoded_texts[start_index:end_index]
__lowerCAmelCase : Dict = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
__lowerCAmelCase : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(_SCREAMING_SNAKE_CASE), attn_mask] , dim=1)
__lowerCAmelCase : List[Any] = encoded_batch
with torch.no_grad():
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : List[str] = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase : int = labels[..., 1:].contiguous()
__lowerCAmelCase : str = attn_mask[..., 1:].contiguous()
__lowerCAmelCase : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , _SCREAMING_SNAKE_CASE) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE)} | 269 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 1 |
"""simple docstring"""
from functools import reduce
__snake_case : Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowercase ( __snake_case = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __snake_case ,__snake_case : str(int(__snake_case ) * int(__snake_case ) ) ,n[i : i + 13] ) )
for i in range(len(__snake_case ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 269 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case : Union[str, Any] = 0
__snake_case : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case : Union[str, Any] = tuple[int, int]
class A__ :
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Node | None , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = pos_x
__lowerCAmelCase : List[str] = pos_y
__lowerCAmelCase : int = (pos_y, pos_x)
__lowerCAmelCase : int = goal_x
__lowerCAmelCase : Dict = goal_y
__lowerCAmelCase : List[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Any = self.calculate_heuristic()
__lowerCAmelCase : str = self.g_cost + self.h_cost
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> float:
"""simple docstring"""
__lowerCAmelCase : int = self.pos_x - self.goal_x
__lowerCAmelCase : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_SCREAMING_SNAKE_CASE) + abs(_SCREAMING_SNAKE_CASE)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self: Dict , _SCREAMING_SNAKE_CASE: Node) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: TPosition , _SCREAMING_SNAKE_CASE: TPosition) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self: str) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : List[str] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_SCREAMING_SNAKE_CASE)
self.closed_nodes.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.get_successors(_SCREAMING_SNAKE_CASE)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE)
else:
# retrieve the best current path
__lowerCAmelCase : List[str] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE)
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE)
return [self.start.pos]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Node) -> list[Node]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for action in delta:
__lowerCAmelCase : str = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ))
return successors
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Node | None) -> list[TPosition]:
"""simple docstring"""
__lowerCAmelCase : str = node
__lowerCAmelCase : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCAmelCase : Any = current_node.parent
path.reverse()
return path
class A__ :
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: TPosition , _SCREAMING_SNAKE_CASE: TPosition) -> None:
"""simple docstring"""
__lowerCAmelCase : str = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self: int) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase : int = self.fwd_astar.open_nodes.pop(0)
__lowerCAmelCase : str = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.fwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE)
self.bwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = current_bwd_node
__lowerCAmelCase : List[str] = current_fwd_node
__lowerCAmelCase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(_SCREAMING_SNAKE_CASE),
self.bwd_astar: self.bwd_astar.get_successors(_SCREAMING_SNAKE_CASE),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_SCREAMING_SNAKE_CASE)
else:
# retrieve the best current path
__lowerCAmelCase : Any = astar.open_nodes.pop(
astar.open_nodes.index(_SCREAMING_SNAKE_CASE))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_SCREAMING_SNAKE_CASE)
else:
astar.open_nodes.append(_SCREAMING_SNAKE_CASE)
return [self.fwd_astar.start.pos]
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Node , _SCREAMING_SNAKE_CASE: Node) -> list[TPosition]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.fwd_astar.retrace_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.bwd_astar.retrace_path(_SCREAMING_SNAKE_CASE)
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case : Dict = (0, 0)
__snake_case : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case : List[Any] = time.time()
__snake_case : Any = AStar(init, goal)
__snake_case : List[Any] = a_star.search()
__snake_case : Optional[Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__snake_case : Dict = time.time()
__snake_case : Union[str, Any] = BidirectionalAStar(init, goal)
__snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""") | 269 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def _lowercase ( ) -> List[Any]:
if os.name == "nt":
__lowerCAmelCase : Dict = CursorInfo()
__lowerCAmelCase : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case ,ctypes.byref(__snake_case ) )
__lowerCAmelCase : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case ,ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _lowercase ( ) -> Optional[Any]:
if os.name == "nt":
__lowerCAmelCase : int = CursorInfo()
__lowerCAmelCase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case ,ctypes.byref(__snake_case ) )
__lowerCAmelCase : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case ,ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _lowercase ( ) -> int:
try:
hide_cursor()
yield
finally:
show_cursor() | 269 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowercase ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 269 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__lowerCAmelCase : int = _modexpt(__snake_case ,exponent // 2 ,__snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__snake_case ,exponent - 1 ,__snake_case )) % modulo_value
def _lowercase ( __snake_case = 1_777 ,__snake_case = 1_855 ,__snake_case = 8 ) -> int:
__lowerCAmelCase : Dict = base
for _ in range(1 ,__snake_case ):
__lowerCAmelCase : Optional[int] = _modexpt(__snake_case ,__snake_case ,10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 269 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xlnet'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=3_2000 , _SCREAMING_SNAKE_CASE: Any=1024 , _SCREAMING_SNAKE_CASE: str=24 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: List[str]=4096 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: int="bi" , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=1e-12 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: int=False , _SCREAMING_SNAKE_CASE: str=-1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Dict="last" , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Dict="tanh" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=5 , _SCREAMING_SNAKE_CASE: Tuple=5 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Tuple=2 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Any = d_model
__lowerCAmelCase : Tuple = n_layer
__lowerCAmelCase : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""")
__lowerCAmelCase : int = d_model // n_head
__lowerCAmelCase : Any = ff_activation
__lowerCAmelCase : str = d_inner
__lowerCAmelCase : List[str] = untie_r
__lowerCAmelCase : Tuple = attn_type
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : str = dropout
__lowerCAmelCase : List[str] = mem_len
__lowerCAmelCase : Dict = reuse_len
__lowerCAmelCase : int = bi_data
__lowerCAmelCase : Optional[int] = clamp_len
__lowerCAmelCase : List[Any] = same_length
__lowerCAmelCase : Dict = summary_type
__lowerCAmelCase : int = summary_use_proj
__lowerCAmelCase : Dict = summary_activation
__lowerCAmelCase : str = summary_last_dropout
__lowerCAmelCase : Union[str, Any] = start_n_top
__lowerCAmelCase : List[Any] = end_n_top
__lowerCAmelCase : Union[str, Any] = bos_token_id
__lowerCAmelCase : Tuple = pad_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = kwargs["use_cache"]
__lowerCAmelCase : Tuple = use_mems_eval
__lowerCAmelCase : Dict = use_mems_train
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""")
return -1
@max_position_embeddings.setter
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> Any:
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""") | 269 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__snake_case : int = False
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
return 12
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
return 12
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
return 32
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
__lowerCAmelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
__lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
__lowerCAmelCase : Optional[Any] = 12
__lowerCAmelCase : Dict = 12
__lowerCAmelCase : Any = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__lowerCAmelCase : str = TransformeraDModel(**_SCREAMING_SNAKE_CASE)
return model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = "cpu"
__lowerCAmelCase : Tuple = self.dummy_vqvae
__lowerCAmelCase : List[Any] = self.dummy_text_encoder
__lowerCAmelCase : Any = self.dummy_tokenizer
__lowerCAmelCase : Optional[int] = self.dummy_transformer
__lowerCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed)
__lowerCAmelCase : Tuple = LearnedClassifierFreeSamplingEmbeddings(learnable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = "teddy bear playing in the pool"
__lowerCAmelCase : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(0)
__lowerCAmelCase : Dict = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np")
__lowerCAmelCase : List[str] = output.images
__lowerCAmelCase : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(0)
__lowerCAmelCase : Tuple = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2)[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCAmelCase : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = "cpu"
__lowerCAmelCase : List[Any] = self.dummy_vqvae
__lowerCAmelCase : List[Any] = self.dummy_text_encoder
__lowerCAmelCase : Any = self.dummy_tokenizer
__lowerCAmelCase : Optional[int] = self.dummy_transformer
__lowerCAmelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed)
__lowerCAmelCase : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__lowerCAmelCase : Optional[int] = VQDiffusionPipeline(
vqvae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , transformer=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = "teddy bear playing in the pool"
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(0)
__lowerCAmelCase : Any = pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np")
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(0)
__lowerCAmelCase : Optional[Any] = pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , num_inference_steps=2)[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCAmelCase : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy")
__lowerCAmelCase : Union[str, Any] = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq")
__lowerCAmelCase : Optional[int] = pipeline.to(_SCREAMING_SNAKE_CASE)
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(0)
__lowerCAmelCase : Any = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0 | 269 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__snake_case : List[str] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__snake_case : Tuple = concatenate_datasets
__snake_case : Any = DownloadConfig
__snake_case : Tuple = DownloadManager
__snake_case : str = DownloadMode
__snake_case : List[Any] = DownloadConfig
__snake_case : Optional[Any] = DownloadMode
__snake_case : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 269 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
SCREAMING_SNAKE_CASE = 'translator'
SCREAMING_SNAKE_CASE = AutoTokenizer
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE = LANGUAGE_CODES
SCREAMING_SNAKE_CASE = ['text', 'text', 'text']
SCREAMING_SNAKE_CASE = ['text']
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""")
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""")
__lowerCAmelCase : Optional[Any] = self.lang_to_code[src_lang]
__lowerCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_SCREAMING_SNAKE_CASE , return_tensors="pt" , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
return self.model.generate(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> List[Any]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_SCREAMING_SNAKE_CASE) | 269 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Dict = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'informer'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: str = "student_t" , _SCREAMING_SNAKE_CASE: str = "nll" , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: List[int] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, bool]] = "mean" , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: int = 64 , _SCREAMING_SNAKE_CASE: int = 32 , _SCREAMING_SNAKE_CASE: int = 32 , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: str = "gelu" , _SCREAMING_SNAKE_CASE: float = 0.05 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: int = 100 , _SCREAMING_SNAKE_CASE: float = 0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str = "prob" , _SCREAMING_SNAKE_CASE: int = 5 , _SCREAMING_SNAKE_CASE: bool = True , **_SCREAMING_SNAKE_CASE: str , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = prediction_length
__lowerCAmelCase : Optional[Any] = context_length or prediction_length
__lowerCAmelCase : Optional[int] = distribution_output
__lowerCAmelCase : Any = loss
__lowerCAmelCase : str = input_size
__lowerCAmelCase : Optional[Any] = num_time_features
__lowerCAmelCase : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowerCAmelCase : Optional[int] = scaling
__lowerCAmelCase : List[Any] = num_dynamic_real_features
__lowerCAmelCase : Optional[Any] = num_static_real_features
__lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
__lowerCAmelCase : Optional[int] = cardinality
else:
__lowerCAmelCase : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
__lowerCAmelCase : Tuple = embedding_dimension
else:
__lowerCAmelCase : Optional[int] = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
__lowerCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : Dict = input_size * len(self.lags_sequence) + self._number_of_features
__lowerCAmelCase : Union[str, Any] = d_model
__lowerCAmelCase : Any = encoder_attention_heads
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[int] = encoder_ffn_dim
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : Tuple = decoder_layers
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : List[Any] = activation_dropout
__lowerCAmelCase : List[Any] = encoder_layerdrop
__lowerCAmelCase : Union[str, Any] = decoder_layerdrop
__lowerCAmelCase : Any = activation_function
__lowerCAmelCase : Any = init_std
__lowerCAmelCase : Union[str, Any] = use_cache
# Informer
__lowerCAmelCase : int = attention_type
__lowerCAmelCase : Dict = sampling_factor
__lowerCAmelCase : List[str] = distil
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 269 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(__snake_case ,__snake_case ,__snake_case ,__snake_case )
for node in graph )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> bool:
visited.add(__snake_case )
rec_stk.add(__snake_case )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__snake_case ,__snake_case ,__snake_case ,__snake_case ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__snake_case )
return False
if __name__ == "__main__":
from doctest import testmod
testmod() | 269 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE = Features({} )
SCREAMING_SNAKE_CASE = "text"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"} | 269 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 1 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Optional[int] = word.split()
def justify(__snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Tuple = max_width - width
__lowerCAmelCase : str = len(__snake_case )
if len(__snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__lowerCAmelCase : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__lowerCAmelCase : Dict = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__lowerCAmelCase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__snake_case ):
num_spaces_between_words_list[i] += 1
__lowerCAmelCase : Union[str, Any] = []
for i in range(__snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__snake_case )
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : list[str] = []
__lowerCAmelCase : Tuple = 0
for word in words:
if width + len(__snake_case ) + len(__snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__snake_case )
width += len(__snake_case )
else:
# justify the line and add it to result
answer.append(justify(__snake_case ,__snake_case ,__snake_case ) )
# reset new line and new width
__lowerCAmelCase , __lowerCAmelCase : Any = [word], len(__snake_case )
__lowerCAmelCase : Dict = max_width - width - len(__snake_case )
answer.append(" ".join(__snake_case ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 269 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
@register_to_config
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int = 3 , _SCREAMING_SNAKE_CASE: int = 3 , _SCREAMING_SNAKE_CASE: Tuple[str] = ("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE: Tuple[str] = ("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE: Tuple[int] = (64,) , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: str = "silu" , _SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 32 , _SCREAMING_SNAKE_CASE: int = 32 , _SCREAMING_SNAKE_CASE: float = 0.1_8215 , ) -> List[str]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowerCAmelCase : int = Encoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , down_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , double_z=_SCREAMING_SNAKE_CASE , )
# pass init params to Decoder
__lowerCAmelCase : List[Any] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , up_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
__lowerCAmelCase : str = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : int = False
__lowerCAmelCase : str = False
# only relevant if vae tiling is enabled
__lowerCAmelCase : List[Any] = self.config.sample_size
__lowerCAmelCase : Tuple = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
__lowerCAmelCase : Dict = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
__lowerCAmelCase : Tuple = 0.25
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict=False) -> Optional[Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (Encoder, Decoder)):
__lowerCAmelCase : Tuple = value
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: bool = True) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = use_tiling
def _SCREAMING_SNAKE_CASE ( self: Any) -> Union[str, Any]:
"""simple docstring"""
self.enable_tiling(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
__lowerCAmelCase : str = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: torch.nn.Module , _SCREAMING_SNAKE_CASE: Dict[str, AttentionProcessor]):
if hasattr(_SCREAMING_SNAKE_CASE , "set_processor"):
__lowerCAmelCase : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return processors
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[AttentionProcessor, Dict[str, AttentionProcessor]]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = len(self.attn_processors.keys())
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) and len(_SCREAMING_SNAKE_CASE) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE)} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""")
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: torch.nn.Module , _SCREAMING_SNAKE_CASE: Optional[Any]):
if hasattr(_SCREAMING_SNAKE_CASE , "set_processor"):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
module.set_processor(_SCREAMING_SNAKE_CASE)
else:
module.set_processor(processor.pop(F"""{name}.processor"""))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = True) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE)
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase : Union[str, Any] = [self.encoder(_SCREAMING_SNAKE_CASE) for x_slice in x.split(1)]
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Tuple = self.encoder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.quant_conv(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.post_quant_conv(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.decoder(_SCREAMING_SNAKE_CASE)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE)
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase : List[str] = [self._decode(_SCREAMING_SNAKE_CASE).sample for z_slice in z.split(1)]
__lowerCAmelCase : Tuple = torch.cat(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Tuple = self._decode(_SCREAMING_SNAKE_CASE).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = min(a.shape[2] , b.shape[2] , _SCREAMING_SNAKE_CASE)
for y in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = min(a.shape[3] , b.shape[3] , _SCREAMING_SNAKE_CASE)
for x in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = True) -> AutoencoderKLOutput:
"""simple docstring"""
__lowerCAmelCase : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
__lowerCAmelCase : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor)
__lowerCAmelCase : Any = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase : Optional[int] = []
for i in range(0 , x.shape[2] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = []
for j in range(0 , x.shape[3] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase : Dict = self.encoder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.quant_conv(_SCREAMING_SNAKE_CASE)
row.append(_SCREAMING_SNAKE_CASE)
rows.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase : Any = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if j > 0:
__lowerCAmelCase : Optional[Any] = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3))
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=2)
__lowerCAmelCase : Optional[Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
__lowerCAmelCase : Optional[int] = int(self.tile_sample_min_size * self.tile_overlap_factor)
__lowerCAmelCase : Union[str, Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase : List[str] = []
for i in range(0 , z.shape[2] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = []
for j in range(0 , z.shape[3] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase : List[Any] = self.post_quant_conv(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.decoder(_SCREAMING_SNAKE_CASE)
row.append(_SCREAMING_SNAKE_CASE)
rows.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Union[str, Any] = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase : Optional[Any] = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if j > 0:
__lowerCAmelCase : Any = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3))
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: torch.FloatTensor , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__lowerCAmelCase : Any = sample
__lowerCAmelCase : Union[str, Any] = self.encode(_SCREAMING_SNAKE_CASE).latent_dist
if sample_posterior:
__lowerCAmelCase : Union[str, Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Union[str, Any] = posterior.mode()
__lowerCAmelCase : List[str] = self.decode(_SCREAMING_SNAKE_CASE).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE) | 269 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any=13 , _SCREAMING_SNAKE_CASE: Dict=7 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=99 , _SCREAMING_SNAKE_CASE: str=32 , _SCREAMING_SNAKE_CASE: Dict=5 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: Tuple=37 , _SCREAMING_SNAKE_CASE: Dict="gelu" , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Optional[int]=16 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: List[Any]=4 , ) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Optional[Any] = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : int = use_attention_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : List[Any] = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : List[Any] = num_choices
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Optional[int] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = config_and_inputs
__lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxDistilBertModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Dict = model_class_name.from_pretrained("distilbert-base-uncased")
__lowerCAmelCase : Any = model(np.ones((1, 1)))
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased")
__lowerCAmelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
__lowerCAmelCase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Any = prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger(__name__)
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : Tuple = torch.load(__snake_case ,map_location="cpu" )
if "model" in sd.keys():
__lowerCAmelCase : Any = torch.load(__snake_case ,map_location="cpu" )["model"]
# pop unnecessary weights
__lowerCAmelCase : List[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__snake_case )
__lowerCAmelCase : Dict = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowerCAmelCase : Union[str, Any] = sd.pop(__snake_case )
__lowerCAmelCase : Tuple = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowerCAmelCase : str = sd[key]
# We split QKV in separate Q,K,V
__lowerCAmelCase : Any = key.replace(".qkv_proj." ,".q_proj." )
__lowerCAmelCase : List[str] = key.replace(".qkv_proj." ,".k_proj." )
__lowerCAmelCase : Optional[Any] = key.replace(".qkv_proj." ,".v_proj." )
__lowerCAmelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = torch.split(__snake_case ,depth // 3 ,dim=0 )
__lowerCAmelCase : Optional[Any] = q
__lowerCAmelCase : Any = k
__lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ) -> int:
__lowerCAmelCase : Optional[Any] = load_checkpoint(__snake_case )
if config is not None:
__lowerCAmelCase : str = OPTConfig.from_pretrained(__snake_case )
else:
__lowerCAmelCase : str = OPTConfig()
__lowerCAmelCase : int = OPTModel(__snake_case ).half().eval()
model.load_state_dict(__snake_case )
# Check results
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 269 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Dict , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Any , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[Any] , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: str , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: int) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: str) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Tuple , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Dict , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Any , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , *_SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"]) | 269 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.