code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->float:
'''simple docstring'''
_validate_point(_lowercase )
_validate_point(_lowercase )
if len(_lowercase ) != len(_lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowercase , _lowercase ) ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : list[float] ) ->None:
'''simple docstring'''
if point:
if isinstance(_lowercase , _lowercase ):
for item in point:
if not isinstance(_lowercase , (int, float) ):
a : Tuple = (
"Expected a list of numbers as input, found "
F"""{type(_lowercase ).__name__}"""
)
raise TypeError(_lowercase )
else:
a : Any = F"""Expected a list of numbers as input, found {type(_lowercase ).__name__}"""
raise TypeError(_lowercase )
else:
raise ValueError("Missing an input" )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->float:
'''simple docstring'''
_validate_point(_lowercase )
_validate_point(_lowercase )
if len(_lowercase ) != len(_lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowercase , _lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
a : Tuple = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = input("Enter message: " )
a : List[Any] = int(input(F"""Enter key [2-{len(_lowercase ) - 1}]: """ ) )
a : Dict = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
a : Dict = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith("d" ):
a : Optional[Any] = decrypt_message(_lowercase , _lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : str ) ->str:
'''simple docstring'''
a : str = [""] * key
for col in range(_lowercase ):
a : Optional[Any] = col
while pointer < len(_lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : str ) ->str:
'''simple docstring'''
a : int = math.ceil(len(_lowercase ) / key )
a : Union[str, Any] = key
a : List[Any] = (num_cols * num_rows) - len(_lowercase )
a : List[str] = [""] * num_cols
a : int = 0
a : Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
a : List[Any] = 0
row += 1
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if k in (0.04, 0.06):
a : int = k
a : Dict = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> str:
return str(self.k )
def __a ( self , lowerCAmelCase__ ) -> tuple[cva.Mat, list[list[int]]]:
a : Optional[int] = cva.imread(lowerCAmelCase__ , 0 )
a : List[Any] = img.shape
a : list[list[int]] = []
a : List[Any] = img.copy()
a : Union[str, Any] = cva.cvtColor(lowerCAmelCase__ , cva.COLOR_GRAY2RGB )
a : Optional[int] = np.gradient(lowerCAmelCase__ )
a : int = dx**2
a : Any = dy**2
a : Any = dx * dy
a : Dict = 0.04
a : List[Any] = self.window_size // 2
for y in range(lowerCAmelCase__ , h - offset ):
for x in range(lowerCAmelCase__ , w - offset ):
a : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a : List[str] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a : Any = (wxx * wyy) - (wxy**2)
a : int = wxx + wyy
a : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a : int = HarrisCorner(0.04, 3)
a : str = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[Any] ="""gptsan-japanese"""
lowerCamelCase : Any =[
"""past_key_values""",
]
lowerCamelCase : Optional[Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=3_6000 , lowerCAmelCase__=1280 , lowerCAmelCase__=1024 , lowerCAmelCase__=8192 , lowerCAmelCase__=4096 , lowerCAmelCase__=128 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , lowerCAmelCase__=128 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=False , lowerCAmelCase__=0.0 , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.002 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=3_5998 , lowerCAmelCase__=3_5995 , lowerCAmelCase__=3_5999 , **lowerCAmelCase__ , ) -> Optional[Any]:
a : Tuple = vocab_size
a : Any = max_position_embeddings
a : Tuple = d_model
a : Dict = d_ff
a : Tuple = d_ext
a : List[str] = d_spout
a : Dict = num_switch_layers
a : Any = num_ext_layers
a : Union[str, Any] = num_switch_layers + num_ext_layers
a : Dict = num_heads
a : List[Any] = num_experts
a : Dict = expert_capacity
a : Optional[int] = dropout_rate
a : int = layer_norm_epsilon
a : Union[str, Any] = router_bias
a : Any = router_jitter_noise
a : List[Any] = router_dtype
a : List[str] = router_ignore_padding_tokens
a : str = output_hidden_states
a : Any = output_attentions
a : Optional[int] = initializer_factor
a : Dict = output_router_logits
a : Tuple = use_cache
super().__init__(
separator_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( a__ ):
lowerCamelCase : int =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : List[Any] = size if size is not None else {"shortest_edge": 224}
a : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = do_resize
a : Optional[int] = size
a : Tuple = resample
a : List[str] = do_center_crop
a : List[str] = crop_size
a : Tuple = do_rescale
a : Tuple = rescale_factor
a : int = do_normalize
a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
a : str = do_convert_rgb
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a : Optional[Any] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : int = do_resize if do_resize is not None else self.do_resize
a : Union[str, Any] = size if size is not None else self.size
a : int = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__ )
a : Union[str, Any] = resample if resample is not None else self.resample
a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : str = crop_size if crop_size is not None else self.crop_size
a : Optional[int] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ )
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : Dict = image_mean if image_mean is not None else self.image_mean
a : Optional[Any] = image_std if image_std is not None else self.image_std
a : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a : Any = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
a : int = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Dict = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : str = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : List[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : List[str] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0 ) ->int:
'''simple docstring'''
a : int = right or len(_lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =PhobertTokenizer
lowerCamelCase : Dict =False
def __a ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : Any = ["#version: 0.2", "l à</w>"]
a : Dict = {"unk_token": "<unk>"}
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def __a ( self , **lowerCAmelCase__ ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Union[str, Any] = "Tôi là VinAI Research"
a : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def __a ( self ) -> Optional[Any]:
a : Tuple = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : str = "Tôi là VinAI Research"
a : str = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
a : str = tokenizer.tokenize(lowerCAmelCase__ )
print(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : int = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
from scipy.stats import pearsonr
import datasets
a : Union[str, Any] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
a : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
a : List[Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
if return_pvalue:
a : Tuple = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __UpperCamelCase :
lowerCamelCase : Optional[int] =BlenderbotConfig
lowerCamelCase : Dict ={}
lowerCamelCase : Any ="""gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Dict:
a : List[str] = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Any = is_training
a : Optional[int] = use_labels
a : Any = vocab_size
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : str = num_attention_heads
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Any = eos_token_id
a : Optional[int] = pad_token_id
a : Any = bos_token_id
def __a ( self ) -> Any:
a : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a : Optional[Any] = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder()
a : Any = inputs_dict["input_ids"]
a : Tuple = input_ids[:1, :]
a : Dict = inputs_dict["attention_mask"][:1, :]
a : List[Any] = inputs_dict["head_mask"]
a : Any = 1
# first forward pass
a : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a : int = tf.concat([input_ids, next_tokens] , axis=-1 )
a : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a : int = output_from_no_past[:, -3:, random_slice_idx]
a : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Any=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Tuple=None , _lowercase : Dict=None , ) ->Dict:
'''simple docstring'''
if attention_mask is None:
a : str = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : str =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase : Optional[Any] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : Any =(
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Tuple =True
lowerCamelCase : Optional[int] =False
lowerCamelCase : List[Any] =False
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = TFBlenderbotModelTester(self )
a : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> int:
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : int =["""My friends are cool but they eat too many carbs."""]
lowerCamelCase : List[Any] ="""facebook/blenderbot-400M-distill"""
@cached_property
def __a ( self ) -> Any:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self ) -> List[Any]:
a : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
a : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
a : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
a : str = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int | float]] ) ->int:
'''simple docstring'''
a : List[Any] = len(_lowercase )
a : Dict = len(matrix[0] )
a : List[str] = min(_lowercase , _lowercase )
for row in range(_lowercase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowercase ):
a : str = matrix[col][row] / matrix[row][row]
for i in range(_lowercase , _lowercase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a : List[Any] = True
for i in range(row + 1 , _lowercase ):
if matrix[i][row] != 0:
a : Union[str, Any] = matrix[i], matrix[row]
a : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_lowercase ):
a : Optional[int] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[int] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
assert isinstance(_lowercase , _lowercase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
a : Dict = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(_lowercase )
else:
a : List[Any] = sylvester(number - 1 )
a : str = num - 1
a : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a : Any = array[indexa], array[indexa]
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
if length > 1:
a : Tuple = int(length / 2 )
for i in range(_lowercase , low + middle ):
comp_and_swap(_lowercase , _lowercase , i + middle , _lowercase )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
bitonic_merge(_lowercase , low + middle , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
if length > 1:
a : Union[str, Any] = int(length / 2 )
bitonic_sort(_lowercase , _lowercase , _lowercase , 1 )
bitonic_sort(_lowercase , low + middle , _lowercase , 0 )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
a : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
a : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return number | (1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return number & ~(1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return number ^ (1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Dict = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : int ="""git_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=3072 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=224 , lowerCAmelCase__=16 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ) -> str:
super().__init__(**lowerCAmelCase__ )
a : Tuple = hidden_size
a : List[Any] = intermediate_size
a : List[Any] = num_hidden_layers
a : Tuple = num_attention_heads
a : str = num_channels
a : Union[str, Any] = patch_size
a : List[Any] = image_size
a : Optional[int] = initializer_range
a : Tuple = attention_dropout
a : Union[str, Any] = layer_norm_eps
a : List[Any] = hidden_act
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase__ )
a : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[Any] ="""git"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3_0522 , lowerCAmelCase__=768 , lowerCAmelCase__=6 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1024 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=101 , lowerCAmelCase__=102 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
if vision_config is None:
a : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a : Optional[int] = GitVisionConfig(**lowerCAmelCase__ )
a : str = vocab_size
a : Union[str, Any] = hidden_size
a : str = num_hidden_layers
a : List[str] = num_attention_heads
a : Any = hidden_act
a : Tuple = intermediate_size
a : List[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : str = max_position_embeddings
a : int = initializer_range
a : Optional[int] = layer_norm_eps
a : int = position_embedding_type
a : Optional[Any] = use_cache
a : Any = tie_word_embeddings
a : List[Any] = num_image_with_embedding
a : int = bos_token_id
a : str = eos_token_id
def __a ( self ) -> str:
a : int = copy.deepcopy(self.__dict__ )
a : Optional[int] = self.vision_config.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __UpperCamelCase ( unittest.TestCase , a__ ):
def __a ( self ) -> Union[str, Any]:
a : int = load_tool("text-to-speech" )
self.tool.setup()
def __a ( self ) -> Optional[int]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
a : List[str] = self.tool("hey" )
a : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __a ( self ) -> List[str]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
a : List[str] = self.tool("hey" )
a : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
try:
with open(_lowercase , "rb" ) as flax_state_f:
a : Union[str, Any] = from_bytes(_lowercase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowercase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
a : str = flatten_dict(jax.tree_util.tree_map(lambda _lowercase : x.dtype == jnp.bfloataa , _lowercase ) ).values()
if any(_lowercase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
a : Optional[Any] = jax.tree_util.tree_map(
lambda _lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowercase )
a : str = ""
a : Union[str, Any] = flatten_dict(_lowercase , sep="." )
a : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
a : List[str] = []
a : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a : List[Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
a : Union[str, Any] = flax_key_tuple_array[:-1] + ["weight"]
a : int = jnp.transpose(_lowercase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
a : List[Any] = flax_key_tuple_array[:-1] + ["weight"]
a : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
a : Union[str, Any] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowercase ):
a : Dict = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
a : Optional[int] = ".".join(_lowercase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
a : int = np.asarray(_lowercase ) if not isinstance(_lowercase , np.ndarray ) else flax_tensor
a : Optional[Any] = torch.from_numpy(_lowercase )
# remove from missing keys
missing_keys.remove(_lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowercase )
pt_model.load_state_dict(_lowercase )
# re-transform missing_keys to list
a : List[Any] = list(_lowercase )
if len(_lowercase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_lowercase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import math
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 1 , _lowercase : int = 1 , _lowercase : int = 1 ) ->qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
a : Optional[int] = qiskit.QuantumRegister(4 , "qr" )
a : Any = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
a : List[str] = [input_a, input_a, carry_in]
a : List[str] = qiskit.QuantumCircuit(_lowercase , _lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits
a : int = qiskit.Aer.get_backend("aer_simulator" )
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Any = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
super().__init__()
self.register_modules(
prior=lowerCAmelCase__ , image_encoder=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , renderer=lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
if latents is None:
a : List[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a : Optional[int] = latents.to(lowerCAmelCase__ )
a : Dict = latents * scheduler.init_noise_sigma
return latents
def __a ( self , lowerCAmelCase__=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a : Tuple = torch.device(f"""cuda:{gpu_id}""" )
a : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
def __a ( self ) -> int:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(image[0] , torch.Tensor ):
a : Tuple = torch.cat(lowerCAmelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase__ , axis=0 )
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
a : Union[str, Any] = self.image_processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
a : List[Any] = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase__ )
a : Dict = self.image_encoder(lowerCAmelCase__ )["last_hidden_state"]
a : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a : Any = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
a : str = torch.zeros_like(lowerCAmelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 25 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> List[Any]:
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : str = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
a : str = image.shape[0]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
a : Union[str, Any] = len(lowerCAmelCase__ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase__ )}""" )
a : Tuple = self._execution_device
a : Optional[Any] = batch_size * num_images_per_prompt
a : int = guidance_scale > 1.0
a : int = self._encode_image(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ )
a : Any = self.scheduler.timesteps
a : str = self.prior.config.num_embeddings
a : Tuple = self.prior.config.embedding_dim
a : str = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a : Tuple = latents.reshape(latents.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
a : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = self.prior(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , proj_embedding=lowerCAmelCase__ , ).predicted_image_embedding
# remove the variance
a : List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a : Optional[Any] = noise_pred.chunk(2 )
a : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a : List[str] = self.scheduler.step(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase__ )
a : Any = []
for i, latent in enumerate(lowerCAmelCase__ ):
print()
a : Dict = self.renderer.decode(
latent[None, :] , lowerCAmelCase__ , size=lowerCAmelCase__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowerCAmelCase__ )
a : int = torch.stack(lowerCAmelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
a : Any = images.cpu().numpy()
if output_type == "pil":
a : Any = [self.numpy_to_pil(lowerCAmelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase__ )
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
import operator as op
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
a : str = []
a : List[str] = lambda _lowercase , _lowercase : int(x / y ) # noqa: E731 integer division operation
a : Union[str, Any] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " )
else:
a : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " )
a : List[str] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a : Optional[Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ):
'''simple docstring'''
a : Union[str, Any] = len(_lowercase ) // 2
# choose the middle 3 elements
a : int = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Optional[int] = len(_lowercase )
for i in range(length - 1 ):
a : Tuple = i
for k in range(i + 1 , _lowercase ):
if collection[k] < collection[least]:
a : Optional[Any] = k
if least != i:
a : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =BertJapaneseTokenizer
lowerCamelCase : List[Any] =False
lowerCamelCase : List[Any] =True
def __a ( self ) -> List[Any]:
super().setUp()
a : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。"
a : Any = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.get_input_output_texts(lowerCAmelCase__ )
a : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return text, ids
def __a ( self ) -> str:
pass # TODO add if relevant
def __a ( self ) -> str:
pass # TODO add if relevant
def __a ( self ) -> str:
pass # TODO add if relevant
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __a ( self ) -> Optional[Any]:
a : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCAmelCase__ )
a : int = "こんにちは、世界。\nこんばんは、世界。"
a : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase__ , "wb" ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , "rb" ) as handle:
a : List[Any] = pickle.load(lowerCAmelCase__ )
a : Union[str, Any] = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Any = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ) -> str:
try:
a : List[str] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ) -> Tuple:
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ) -> List[Any]:
a : int = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ) -> Union[str, Any]:
try:
a : List[str] = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def __a ( self ) -> Optional[Any]:
a : int = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCAmelCase__ )
a : int = "こんにちは、世界。\nこんばんは、世界。"
a : Dict = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Any = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase__ , "wb" ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , "rb" ) as handle:
a : Any = pickle.load(lowerCAmelCase__ )
a : Optional[Any] = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sudachi
def __a ( self ) -> Any:
a : Tuple = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __a ( self ) -> Dict:
a : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def __a ( self ) -> int:
a : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def __a ( self ) -> Dict:
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def __a ( self ) -> Tuple:
a : Union[str, Any] = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __a ( self ) -> str:
a : List[Any] = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def __a ( self ) -> List[str]:
a : List[Any] = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def __a ( self ) -> Tuple:
a : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCAmelCase__ )
a : str = "こんにちは、世界。\nこんばんは、世界。"
a : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : List[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase__ , "wb" ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , "rb" ) as handle:
a : str = pickle.load(lowerCAmelCase__ )
a : List[str] = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_jumanpp
def __a ( self ) -> List[str]:
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = JumanppTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ) -> Any:
a : List[Any] = JumanppTokenizer(normalize_text=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ) -> Any:
a : Tuple = JumanppTokenizer(trim_whitespace=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def __a ( self ) -> Tuple:
a : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def __a ( self ) -> str:
a : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
a : Optional[Any] = {}
for i, token in enumerate(lowerCAmelCase__ ):
a : int = i
a : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def __a ( self ) -> int:
a : List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
a : Tuple = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCAmelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
a : Dict = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCAmelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def __a ( self ) -> Any:
a : int = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
a : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__ )
a : Union[str, Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__ )
a : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =BertJapaneseTokenizer
lowerCamelCase : Tuple =False
def __a ( self ) -> Tuple:
super().setUp()
a : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : List[str] = "こんにちは、世界。 \nこんばんは、世界。"
a : int = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def __a ( self ) -> int:
pass # TODO add if relevant
def __a ( self ) -> Optional[Any]:
pass # TODO add if relevant
def __a ( self ) -> Any:
pass # TODO add if relevant
def __a ( self ) -> Dict:
a : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
a : List[Any] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCAmelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __a ( self ) -> str:
a : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a : Optional[int] = {}
for i, token in enumerate(lowerCAmelCase__ ):
a : Any = i
a : List[Any] = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def __a ( self ) -> List[str]:
a : Dict = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
a : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__ )
a : Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__ )
a : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
a : List[Any] = "cl-tohoku/bert-base-japanese"
a : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
a : str = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
a : Union[str, Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a : Any = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
a : List[Any] = primes[group]["prime"]
a : List[Any] = primes[group]["generator"]
a : List[Any] = int(hexlify(urandom(32 ) ) , base=16 )
def __a ( self ) -> str:
return hex(self.__private_key )[2:]
def __a ( self ) -> str:
a : Optional[int] = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCAmelCase__ )[2:]
def __a ( self , lowerCAmelCase__ ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCAmelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __a ( self , lowerCAmelCase__ ) -> str:
a : Optional[int] = int(lowerCAmelCase__ , base=16 )
if not self.is_valid_public_key(lowerCAmelCase__ ):
raise ValueError("Invalid public key" )
a : Optional[Any] = pow(lowerCAmelCase__ , self.__private_key , self.prime )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCAmelCase__ , (prime - 1) // 2 , lowerCAmelCase__ ) == 1
)
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 14 ) -> str:
a : Any = int(lowerCAmelCase__ , base=16 )
a : Any = int(lowerCAmelCase__ , base=16 )
a : Tuple = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("Invalid public key" )
a : str = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
a : int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a : Optional[int] = '''>>zh<<'''
a : Any = '''Helsinki-NLP/'''
if is_torch_available():
a : Optional[Any] = '''pt'''
elif is_tf_available():
a : Any = '''tf'''
else:
a : int = '''jax'''
@require_sentencepiece
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =MarianTokenizer
lowerCamelCase : str =False
lowerCamelCase : str =True
def __a ( self ) -> Tuple:
super().setUp()
a : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a : Dict = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
a : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **lowerCAmelCase__ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return (
"This is a test",
"This is a test",
)
def __a ( self ) -> Any:
a : List[Any] = "</s>"
a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) , 9 )
def __a ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __a ( self ) -> List[str]:
a : Dict = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
a : Any = en_de_tokenizer(["I am a small frog"] , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[Any] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase__ , batch.input_ids[0] )
a : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__ )
a : Union[str, Any] = [x.name for x in Path(lowerCAmelCase__ ).glob("*" )]
self.assertIn("source.spm" , lowerCAmelCase__ )
MarianTokenizer.from_pretrained(lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Any = self.get_tokenizer()
a : List[Any] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __a ( self ) -> int:
a : str = self.get_tokenizer()
a : Tuple = tok(["I am a tiny frog", "I am a small frog"] , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __a ( self ) -> Dict:
# fmt: off
a : List[Any] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def __a ( self ) -> int:
a : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
a : Union[str, Any] = "Tämä on testi"
a : Any = "This is a test"
a : Optional[Any] = [76, 7, 2047, 2]
a : Dict = [69, 12, 11, 940, 2]
a : int = tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = tokenizer(text_target=lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ) -> int:
a : str = parent
a : List[str] = batch_size
a : str = seq_length
a : str = is_training
a : Dict = use_attention_mask
a : str = use_token_type_ids
a : List[str] = use_labels
a : Optional[int] = vocab_size
a : Tuple = hidden_size
a : Optional[int] = num_hidden_layers
a : Tuple = num_attention_heads
a : Any = intermediate_size
a : str = hidden_act
a : Any = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Optional[int] = max_position_embeddings
a : Tuple = type_vocab_size
a : List[str] = type_sequence_label_size
a : List[Any] = initializer_range
a : str = num_choices
def __a ( self ) -> List[str]:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[Any] = None
if self.use_attention_mask:
a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
if self.use_token_type_ids:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> List[str]:
a : Union[str, Any] = self.prepare_config_and_inputs()
a : int = config_and_inputs
a : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> Dict:
a : Dict = FlaxAlbertModelTester(self )
@slow
def __a ( self ) -> int:
for model_class_name in self.all_model_classes:
a : Optional[int] = model_class_name.from_pretrained("albert-base-v2" )
a : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
a : Any = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : List[Any] = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
a : Optional[int] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Optional[Any]:
a : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_heads" ) )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=[16, 48, 96] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 10] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> List[Any]:
a : List[str] = parent
a : Tuple = batch_size
a : str = image_size
a : List[Any] = patch_sizes
a : List[Any] = patch_stride
a : int = patch_padding
a : int = is_training
a : str = use_labels
a : List[str] = num_labels
a : str = num_channels
a : Optional[int] = embed_dim
a : Optional[Any] = num_heads
a : Tuple = stride_kv
a : Union[str, Any] = depth
a : Optional[Any] = cls_token
a : List[Any] = attention_drop_rate
a : List[str] = initializer_range
a : str = layer_norm_eps
def __a ( self ) -> Any:
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
a : Dict = ids_tensor([self.batch_size] , self.num_labels )
a : int = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = TFCvtModel(config=lowerCAmelCase__ )
a : Optional[Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
a : List[str] = (self.image_size, self.image_size)
a : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
a : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
a : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = self.num_labels
a : Any = TFCvtForImageClassification(lowerCAmelCase__ )
a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> Tuple:
a : List[str] = self.prepare_config_and_inputs()
a : Optional[int] = config_and_inputs
a : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : List[str] =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase : List[Any] =(
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =False
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : int =False
lowerCamelCase : Dict =False
def __a ( self ) -> Optional[int]:
a : Dict = TFCvtModelTester(self )
a : List[Any] = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __a ( self ) -> Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __a ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __a ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __a ( self ) -> List[Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __a ( self ) -> Optional[int]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __a ( self ) -> int:
a : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = model_class(lowerCAmelCase__ )
a : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __a ( self ) -> int:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = model_class(lowerCAmelCase__ )
a : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Dict = outputs.hidden_states
a : Any = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = TFCvtModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->int:
'''simple docstring'''
a : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a : Any = self.default_image_processor
a : Any = prepare_img()
a : str = image_processor(images=lowerCAmelCase__ , return_tensors="tf" )
# forward pass
a : List[str] = model(**lowerCAmelCase__ )
# verify the logits
a : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a : str = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4 ) )
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =DownBlockaD # noqa F405
lowerCamelCase : List[str] ="""down"""
def __a ( self ) -> List[str]:
a : Optional[Any] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =ResnetDownsampleBlockaD # noqa F405
lowerCamelCase : Tuple ="""down"""
def __a ( self ) -> Dict:
a : Any = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =AttnDownBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""down"""
def __a ( self ) -> Union[str, Any]:
a : int = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =CrossAttnDownBlockaD # noqa F405
lowerCamelCase : List[Any] ="""down"""
def __a ( self ) -> Dict:
a : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
a : int = 32
return init_dict, inputs_dict
def __a ( self ) -> List[Any]:
a : str = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase : Union[str, Any] ="""down"""
@property
def __a ( self ) -> Dict:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : List[str] = super().prepare_init_args_and_inputs_for_common()
a : Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> List[Any]:
a : Optional[int] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =SkipDownBlockaD # noqa F405
lowerCamelCase : int ="""down"""
@property
def __a ( self ) -> str:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : int = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =AttnSkipDownBlockaD # noqa F405
lowerCamelCase : Optional[Any] ="""down"""
@property
def __a ( self ) -> Optional[Any]:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : int = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =DownEncoderBlockaD # noqa F405
lowerCamelCase : Optional[Any] ="""down"""
@property
def __a ( self ) -> Optional[Any]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : List[Any] = {
"in_channels": 32,
"out_channels": 32,
}
a : int = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Tuple:
a : Optional[Any] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405
lowerCamelCase : str ="""down"""
@property
def __a ( self ) -> Tuple:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = {
"in_channels": 32,
"out_channels": 32,
}
a : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : str =UNetMidBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""mid"""
def __a ( self ) -> Dict:
a : Optional[Any] = {
"in_channels": 32,
"temb_channels": 128,
}
a : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Tuple:
a : Tuple = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase : Optional[Any] ="""mid"""
def __a ( self ) -> int:
a : Any = super().prepare_init_args_and_inputs_for_common()
a : List[str] = 32
return init_dict, inputs_dict
def __a ( self ) -> Optional[Any]:
a : Tuple = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase : Dict ="""mid"""
@property
def __a ( self ) -> Optional[int]:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : str = super().prepare_init_args_and_inputs_for_common()
a : str = 32
return init_dict, inputs_dict
def __a ( self ) -> Dict:
a : List[Any] = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =UpBlockaD # noqa F405
lowerCamelCase : Dict ="""up"""
@property
def __a ( self ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Tuple = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =ResnetUpsampleBlockaD # noqa F405
lowerCamelCase : Dict ="""up"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> int:
a : Tuple = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =CrossAttnUpBlockaD # noqa F405
lowerCamelCase : str ="""up"""
@property
def __a ( self ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : List[str] = super().prepare_init_args_and_inputs_for_common()
a : Optional[int] = 32
return init_dict, inputs_dict
def __a ( self ) -> Union[str, Any]:
a : Tuple = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase : Any ="""up"""
@property
def __a ( self ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : Any = super().prepare_init_args_and_inputs_for_common()
a : Tuple = 32
return init_dict, inputs_dict
def __a ( self ) -> Dict:
a : List[Any] = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : str =AttnUpBlockaD # noqa F405
lowerCamelCase : Any ="""up"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : str =SkipUpBlockaD # noqa F405
lowerCamelCase : Dict ="""up"""
@property
def __a ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Dict = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =AttnSkipUpBlockaD # noqa F405
lowerCamelCase : Tuple ="""up"""
@property
def __a ( self ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =UpDecoderBlockaD # noqa F405
lowerCamelCase : Optional[int] ="""up"""
@property
def __a ( self ) -> Optional[Any]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Union[str, Any] = {"in_channels": 32, "out_channels": 32}
a : Dict = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Dict:
a : int = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(lowerCAmelCase__ )
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =AttnUpDecoderBlockaD # noqa F405
lowerCamelCase : int ="""up"""
@property
def __a ( self ) -> List[str]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Tuple = {"in_channels": 32, "out_channels": 32}
a : List[str] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> Optional[int]:
a : str = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(lowerCAmelCase__ )
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ = 101 ) -> List[Any]:
a : Union[str, Any] = length
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
return i
class __UpperCamelCase :
def __call__( self , lowerCAmelCase__ ) -> int:
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __UpperCamelCase ( nn.Module ):
def __init__( self ) -> int:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
a : Any = nn.Linear(120 , 80 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __UpperCamelCase ( a__ ):
@require_torch_neuroncore
def __a ( self ) -> Union[str, Any]:
a : Any = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
a : str = self.get_auto_remove_tmp_dir()
a : Union[str, Any] = f"""--output_dir {output_dir}""".split()
a : Optional[int] = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __UpperCamelCase ( a__ ):
@require_torch_multi_gpu
def __a ( self ) -> List[str]:
a : Any = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
a : int = self.get_auto_remove_tmp_dir()
a : int = f"""--output_dir {output_dir}""".split()
a : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a : List[str] = HfArgumentParser((TrainingArguments,))
a : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
a : List[Any] = DummyDataset(dataset_length)
def _SCREAMING_SNAKE_CASE ( _lowercase : EvalPrediction ) ->Dict:
'''simple docstring'''
a : List[str] = list(range(len(_lowercase ) ) )
a : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : Optional[Any] = 2
a : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : List[Any] = None
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str ="""xlnet"""
lowerCamelCase : Dict =["""mems"""]
lowerCamelCase : Optional[int] ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=3_2000 , lowerCAmelCase__=1024 , lowerCAmelCase__=24 , lowerCAmelCase__=16 , lowerCAmelCase__=4096 , lowerCAmelCase__="gelu" , lowerCAmelCase__=True , lowerCAmelCase__="bi" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=-1 , lowerCAmelCase__=False , lowerCAmelCase__="last" , lowerCAmelCase__=True , lowerCAmelCase__="tanh" , lowerCAmelCase__=0.1 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Optional[int]:
a : Any = vocab_size
a : Tuple = d_model
a : Tuple = n_layer
a : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
a : Tuple = d_model // n_head
a : Optional[Any] = ff_activation
a : List[Any] = d_inner
a : Any = untie_r
a : str = attn_type
a : List[Any] = initializer_range
a : Any = layer_norm_eps
a : List[Any] = dropout
a : List[Any] = mem_len
a : Optional[Any] = reuse_len
a : List[str] = bi_data
a : Dict = clamp_len
a : Any = same_length
a : int = summary_type
a : str = summary_use_proj
a : Tuple = summary_activation
a : Tuple = summary_last_dropout
a : List[Any] = start_n_top
a : Tuple = end_n_top
a : Dict = bos_token_id
a : List[Any] = pad_token_id
a : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowerCAmelCase__ , )
a : Union[str, Any] = kwargs["use_cache"]
a : Optional[Any] = use_mems_eval
a : Dict = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __a ( self ) -> Union[str, Any]:
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , lowerCAmelCase__ ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 50 ) ->int:
'''simple docstring'''
a : Dict = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a : Optional[int] = '''src/transformers'''
a : List[str] = '''docs/source/en/tasks'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : int , _lowercase : int ) ->List[Any]:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
a : int = f.readlines()
# Find the start prompt.
a : Dict = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
a : int = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a : Any = direct_transformers_import(TRANSFORMERS_PATH)
a : List[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a : Optional[Any] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : str = TASK_GUIDE_TO_MODELS[task_guide]
a : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowercase , set() )
a : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : int=False ) ->Union[str, Any]:
'''simple docstring'''
a : str = _find_text_in_file(
filename=os.path.join(_lowercase , _lowercase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
a : Union[str, Any] = get_model_list_for_task(_lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowercase , _lowercase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
a : str = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
a : Dict = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
a : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
a : Union[str, Any] = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a : Tuple = model(lowerCAmelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
@slow
def __a ( self ) -> List[str]:
a : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
a : List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
a : List[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
a : int = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a : Tuple = model(lowerCAmelCase__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : Any = getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : str , _lowercase : str , _lowercase : int = 8 , _lowercase : int = 1024 , _lowercase : List[str]="val" , _lowercase : Union[str, Any]=None , _lowercase : str=False , _lowercase : Optional[Any]="summarization" , _lowercase : int=None , _lowercase : Any=1 , _lowercase : Dict = None , _lowercase : int="" , **_lowercase : str , ) ->Dict:
'''simple docstring'''
a : Optional[Any] = str(_lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=_lowercase )
a : str = Path(_lowercase )
a : int = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowercase )
a : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).cuda()
if fpaa:
a : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowercase , _lowercase ) # update config with task specific params
a : Optional[int] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
a : Union[str, Any] = num_return_sequences
a : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
a : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
a : Optional[Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
a : List[str] = SeqaSeqDataset(
_lowercase , _lowercase , _lowercase , max_target_length=1024 , type_path=_lowercase , n_obs=_lowercase , prefix=_lowercase , **_lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
a : Dict = ds.make_sortish_sampler(_lowercase , distributed=_lowercase , add_extra_examples=_lowercase , shuffle=_lowercase )
a : Optional[int] = DataLoader(_lowercase , sampler=_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn )
a : List[Any] = []
for batch in tqdm(_lowercase ):
a : List[str] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_lowercase , num_beams=_lowercase , **_lowercase , )
a : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
a : Tuple = batch["ids"]
if num_return_sequences > 1:
a : Union[str, Any] = chunks(_lowercase , _lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowercase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(_lowercase , _lowercase )
return results, sampler.num_replicas
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[str] = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=_lowercase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=_lowercase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=_lowercase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=_lowercase , default=_lowercase )
parser.add_argument(
"--type_path" , type=_lowercase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=_lowercase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_lowercase , default=8 , required=_lowercase , help="batch size" )
parser.add_argument(
"--local_rank" , type=_lowercase , default=-1 , required=_lowercase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=_lowercase , default=_lowercase , required=_lowercase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=_lowercase , default=1 , required=_lowercase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=_lowercase , default=600 , required=_lowercase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument("--tgt_lang" , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument(
"--prefix" , type=_lowercase , required=_lowercase , default=_lowercase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
a : str = time.time()
a : Any = parser.parse_known_args()
a : Tuple = parse_numeric_n_bool_cl_kwargs(_lowercase )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
a : str = Path(args.save_dir + "_tmp" )
Path(_lowercase ).mkdir(exist_ok=_lowercase ) # this handles locking.
a : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
a : str = {}
if args.src_lang is not None:
a : List[Any] = args.src_lang
if args.tgt_lang is not None:
a : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowercase )
a : Any = eval_data_dir(
args.data_dir , _lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowercase , **_lowercase , )
if args.local_rank <= 0:
a : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowercase )
a : Union[str, Any] = gather_results_from_each_node(_lowercase , _lowercase , args.sync_timeout )
a : str = combine_partial_results(_lowercase )
if args.num_return_sequences > 1:
a : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowercase , _lowercase )
return
a : Tuple = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(_lowercase ) as f:
a : str = [x.rstrip() for x in f.readlines()][: len(_lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
a : Any = "translation" in args.task
a : Tuple = calculate_bleu if calc_bleu else calculate_rouge
a : Tuple = "bleu" if calc_bleu else "rouge"
a : Dict = score_fn(_lowercase , _lowercase )
a : Dict = len(_lowercase )
a : Optional[int] = time.time() - start_time
a : Tuple = round(runtime / metrics["n_obs"] , 4 )
a : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
a : Any = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(_lowercase , _lowercase , indent=_lowercase )
print(_lowercase )
write_txt_file(_lowercase , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowercase , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->List:
'''simple docstring'''
a : List[str] = []
for partial_result in partial_results:
records.extend(_lowercase )
a : str = sorted(_lowercase , key=lambda _lowercase : x["id"] )
a : List[Any] = [x["pred"] for x in records]
return preds
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str ) ->List[Dict[str, List]]:
'''simple docstring'''
a : Optional[Any] = time.time()
logger.info("waiting for all nodes to finish" )
a : str = None
while (time.time() - start_wait) < timeout:
a : int = list(save_dir.glob("rank_*.json" ) )
if len(_lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
a : Tuple = lmap(_lowercase , _lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
a : str = img
a : Any = img.shape[1]
a : Union[str, Any] = img.shape[0]
a : str = dst_width
a : Optional[int] = dst_height
a : Union[str, Any] = self.src_w / self.dst_w
a : List[str] = self.src_h / self.dst_h
a : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __a ( self ) -> Dict:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
a : Any = self.img[self.get_y(lowerCAmelCase__ )][self.get_x(lowerCAmelCase__ )]
def __a ( self , lowerCAmelCase__ ) -> int:
return int(self.ratio_x * x )
def __a ( self , lowerCAmelCase__ ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
a : Any = 800, 600
a : Union[str, Any] = imread('''image_data/lena.jpg''', 1)
a : List[Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : int =DebertaVaTokenizer
lowerCamelCase : List[str] =DebertaVaTokenizerFast
lowerCamelCase : Tuple =True
lowerCamelCase : Any =True
def __a ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : Tuple = "this is a test"
a : int = "this is a test"
return input_text, output_text
def __a ( self ) -> int:
a : Any = "<pad>"
a : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0001 )
def __a ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __a ( self ) -> List[Any]:
# fmt: off
a : Tuple = " \tHeLLo!how \n Are yoU? "
a : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
a : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
a : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
a : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __a ( self ) -> List[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Any:
# fmt: off
a : Union[str, Any] = "I was born in 92000, and this is falsé."
a : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
# fmt: off
a : Any = "I was born in 92000, and this is falsé."
a : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Optional[int] = "I was born in 92000, and this is falsé."
a : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Dict = "I was born in 92000, and this is falsé."
a : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
a : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
# fmt: off
a : Dict = " \tHeLLo!how \n Are yoU? "
a : Optional[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
a : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
a : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : List[str] = self.get_tokenizer()
a : Optional[Any] = self.get_rust_tokenizer()
a : Union[str, Any] = "I was born in 92000, and this is falsé."
a : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
a : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = self.get_rust_tokenizer()
a : Dict = tokenizer.encode(lowerCAmelCase__ )
a : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Tuple = "This is a test"
a : Optional[int] = [13, 1, 4398, 25, 21, 1289]
a : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
a : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
a : int = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
a : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
a : Dict = "I was born in 92000, and this is falsé."
a : Optional[Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
a : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
a : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
a : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Optional[int] = DebertaVaTokenizer(lowerCAmelCase__ )
a : Optional[Any] = tokenizer.encode("sequence builders" )
a : int = tokenizer.encode("multi-sequence build" )
a : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def __a ( self ) -> List[str]:
# fmt: off
a : Any = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a : List[Any] = 50003
a : Union[str, Any] = 50002
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =PLBartTokenizer
lowerCamelCase : List[Any] =None
lowerCamelCase : Union[str, Any] =False
def __a ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a : List[str] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Dict:
a : Optional[int] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__ )
a : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
a : Optional[int] = tokenizer.vocab_size
a : Any = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) for x in range(end - 4 , lowerCAmelCase__ )]
self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
a : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a : Optional[int] = tokenizer(lowerCAmelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) , lowerCAmelCase__ , )
def __a ( self ) -> Tuple:
a : Any = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__ )
a : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
a : Dict = tokenizer.vocab_size
a : List[Any] = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) for x in range(end - 7 , lowerCAmelCase__ )]
self.assertListEqual(
lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
a : List[str] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a : int = tokenizer(lowerCAmelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) , lowerCAmelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : int ="""uclanlp/plbart-python-en_XX"""
lowerCamelCase : Tuple =[
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowerCamelCase : List[Any] =[
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowerCamelCase : List[Any] =[
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __a ( cls ) -> List[Any]:
a : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
a : Optional[Any] = 1
return cls
def __a ( self ) -> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003 )
def __a ( self ) -> int:
a : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __a ( self ) -> int:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
a : List[str] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
a : List[Any] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase__ )
a : Optional[Any] = 10
a : Tuple = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0004, 5_0001] )
def __a ( self ) -> Tuple:
a : str = tempfile.mkdtemp()
a : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
a : List[str] = PLBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def __a ( self ) -> Optional[int]:
a : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" )
a : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __a ( self ) -> Dict:
a : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a : Optional[int] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
a : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __a ( self ) -> Any:
a : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt" )
a : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt" )
a : List[Any] = targets["input_ids"]
a : Tuple = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> List[Any]:
a : List[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ):
'''simple docstring'''
a : List[str] = len(grid[0] )
a : Union[str, Any] = len(_lowercase )
a : str = 0
a : List[str] = 0
a : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowercase ):
for j in range(n_rows - 3 ):
a : Tuple = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a : int = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a : List[Any] = max(
_lowercase , _lowercase , _lowercase , _lowercase )
if max_product > largest:
a : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
a : List[str] = []
with open(os.path.dirname(_lowercase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
a : Dict = [[int(_lowercase ) for i in grid[j]] for j in range(len(_lowercase ) )]
return largest_product(_lowercase )
if __name__ == "__main__":
print(solution())
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import random
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Optional[Any] ) ->tuple:
'''simple docstring'''
a : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int ) ->Dict:
'''simple docstring'''
if index >= len(_lowercase ) or index < 0:
return None
a : Optional[int] = items[random.randint(0 , len(_lowercase ) - 1 )]
a : Dict = 0
a : str = _partition(_lowercase , _lowercase )
a : List[Any] = len(_lowercase )
a : int = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
a : int = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
a : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a : List[Any] = model(lowerCAmelCase__ )["last_hidden_state"]
a : Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
a : str = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : int = '''▁'''
a : Dict = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
a : Tuple = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
a : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
a : Any = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
a : Dict = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =["input_ids"]
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =RESOURCE_FILES_NAMES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__="utf8" , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , vocab_file=lowerCAmelCase__ , encoding=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a : List[str] = do_lower_case
a : Any = sentencepiece_model_ckpt
a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
a : Any = self.load_vocab(filepath=lowerCAmelCase__ )
else:
a : Dict = {self.sp_model.id_to_piece(lowerCAmelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
a : Any = {v: k for k, v in self.vocab.items()}
def __a ( self , lowerCAmelCase__ ) -> Any:
if text is None:
return None
a : List[Any] = self.tokenize(lowerCAmelCase__ )
a : Any = "", []
for i, ch in enumerate(lowerCAmelCase__ ):
if ch in self.SP_CHAR_MAPPING:
a : Any = self.SP_CHAR_MAPPING.get(lowerCAmelCase__ )
else:
a : Optional[Any] = unicodedata.normalize("NFKC" , lowerCAmelCase__ )
if self.is_whitespace(lowerCAmelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowerCAmelCase__ ) )
a : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
a : List[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
a : Tuple = token[1:]
a : Any = text[offset:].index(lowerCAmelCase__ ) + offset
a : Union[str, Any] = start + len(lowerCAmelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
a : Any = end
return token_mapping
@property
def __a ( self ) -> Dict:
return len(self.vocab )
def __a ( self ) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Optional[Any]:
a : Optional[int] = self.__dict__.copy()
a : Any = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a : int = {}
a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
return "".join((self.SP_CHAR_MAPPING.get(lowerCAmelCase__ , lowerCAmelCase__ ) for c in text) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=64 , lowerCAmelCase__=0.1 ) -> List[Any]:
if self.sp_model_kwargs.get("enable_sampling" ) is True:
a : int = True
if self.sp_model_kwargs.get("alpha" ) is not None:
a : Any = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
a : str = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
a : int = self.sp_model.EncodeAsPieces(lowerCAmelCase__ )
else:
a : List[str] = self.sp_model.SampleEncodeAsPieces(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[Any] = []
for pi, piece in enumerate(lowerCAmelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowerCAmelCase__ ) and pi != 0:
new_pieces.append(lowerCAmelCase__ )
continue
else:
continue
a : Union[str, Any] = 0
for i, chunk in enumerate(lowerCAmelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowerCAmelCase__ ) or self.is_punct(lowerCAmelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowerCAmelCase__ )
a : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
a : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
a : Optional[Any] = i
if len(lowerCAmelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Any = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[Any] = self.convert_ids_to_tokens(lowerCAmelCase__ )
a : Optional[Any] = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def __a ( self , lowerCAmelCase__ ) -> str:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __a ( self , lowerCAmelCase__ ) -> str:
return self.reverse_vocab.get(lowerCAmelCase__ , self.unk_token )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Tuple = [self.cls_token_id]
a : Any = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> Tuple:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowerCAmelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowerCAmelCase__ ) + 1) + [1] * (len(lowerCAmelCase__ ) + 3)
def __a ( self , lowerCAmelCase__ ) -> List[str]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __a ( self , lowerCAmelCase__ ) -> str:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __a ( self , lowerCAmelCase__ ) -> Any:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __a ( self , lowerCAmelCase__ ) -> Dict:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowerCAmelCase__ ) == 1:
a : List[str] = unicodedata.category(lowerCAmelCase__ )
if cat == "Zs":
return True
return False
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[Any] = {}
with io.open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowerCAmelCase__ ):
a : List[Any] = line.rstrip("\n" )
a : List[Any] = int(lowerCAmelCase__ )
return token_to_idx
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = 0
if os.path.isdir(lowerCAmelCase__ ):
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
a : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
a : str = token_index
writer.write(token + "\n" )
index += 1
a : Any = os.path.join(lowerCAmelCase__ , "sentencepiece.bpe.model" )
with open(lowerCAmelCase__ , "wb" ) as fi:
a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (vocab_file,)
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __UpperCamelCase :
lowerCamelCase : Tuple =None
def __a ( self ) -> Dict:
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : Tuple = os.path.join(lowerCAmelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase__ )
a : Dict = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ) -> int:
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : int = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
a : str = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ) -> Tuple:
a : int = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase__ )
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ) ->int:
'''simple docstring'''
if index == r:
for j in range(_lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
a : Any = arr[i]
combination_util(_lowercase , _lowercase , _lowercase , index + 1 , _lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Tuple , _lowercase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
a : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowercase , _lowercase , _lowercase , 0 , _lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a : Union[str, Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["bs4"] )
super().__init__(**lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[int] = []
a : Optional[Any] = []
a : Dict = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
a : List[str] = parent.find_all(child.name , recursive=lowerCAmelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ , 1 ) if s is child ) )
a : Optional[int] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __a ( self , lowerCAmelCase__ ) -> int:
a : str = BeautifulSoup(lowerCAmelCase__ , "html.parser" )
a : Optional[int] = []
a : List[Any] = []
a : List[str] = []
for element in html_code.descendants:
if type(lowerCAmelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
a : List[Any] = html.unescape(lowerCAmelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__ )
a : List[Any] = self.xpath_soup(lowerCAmelCase__ )
stringaxtag_seq.append(lowerCAmelCase__ )
stringaxsubs_seq.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : int = ""
for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , lowerCAmelCase__ ) -> BatchFeature:
a : List[Any] = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = True
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase__ ):
a : int = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"""but is of type {type(lowerCAmelCase__ )}.""" )
a : Optional[Any] = bool(isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase__ )) )
if not is_batched:
a : Dict = [html_strings]
# Get nodes + xpaths
a : Optional[Any] = []
a : Dict = []
for html_string in html_strings:
a : str = self.get_three_from_single(lowerCAmelCase__ )
nodes.append(lowerCAmelCase__ )
a : Dict = []
for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__ )
xpath_strings.append(lowerCAmelCase__ )
xpaths.append(lowerCAmelCase__ )
# return as Dict
a : Optional[Any] = {"nodes": nodes, "xpaths": xpaths}
a : List[Any] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __UpperCamelCase ( datasets.BeamBasedBuilder ):
def __a ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
class __UpperCamelCase ( datasets.BeamBasedBuilder ):
def __a ( self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __UpperCamelCase ( a__ ):
@require_beam
def __a ( self ) -> Optional[int]:
a : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : Any = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self ) -> Union[str, Any]:
import apache_beam as beam
a : int = beam.io.parquetio.WriteToParquet
a : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : Dict = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
a : str = partial(lowerCAmelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : int = DummyBeamDataset(cache_dir=lowerCAmelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self ) -> Optional[Any]:
a : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : int = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
a : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any ="""efficientnet"""
def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 3.1 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase__ = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase__ = [] , lowerCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ = 0.25 , lowerCAmelCase__ = "swish" , lowerCAmelCase__ = 2560 , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.99 , lowerCAmelCase__ = 0.5 , lowerCAmelCase__ = 0.2 , **lowerCAmelCase__ , ) -> Dict:
super().__init__(**lowerCAmelCase__ )
a : Optional[Any] = num_channels
a : List[str] = image_size
a : Any = width_coefficient
a : Optional[int] = depth_coefficient
a : Tuple = depth_divisor
a : Any = kernel_sizes
a : Any = in_channels
a : int = out_channels
a : List[Any] = depthwise_padding
a : Optional[Any] = strides
a : List[Any] = num_block_repeats
a : List[Any] = expand_ratios
a : List[str] = squeeze_expansion_ratio
a : List[Any] = hidden_act
a : List[Any] = hidden_dim
a : int = pooling_type
a : Any = initializer_range
a : List[Any] = batch_norm_eps
a : Dict = batch_norm_momentum
a : int = dropout_rate
a : List[str] = drop_connect_rate
a : Dict = sum(lowerCAmelCase__ ) * 4
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1E-5
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
a : Tuple = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
a : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a : Tuple = F'''down_blocks.{i}.resnets.{j}.'''
a : Optional[int] = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a : Dict = F'''down_blocks.{i}.attentions.{j}.'''
a : int = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a : Dict = F'''up_blocks.{i}.resnets.{j}.'''
a : int = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a : Any = F'''up_blocks.{i}.attentions.{j}.'''
a : Optional[int] = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a : Dict = F'''down_blocks.{i}.downsamplers.0.conv.'''
a : int = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a : Optional[Any] = '''mid_block.attentions.0.'''
a : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a : List[str] = F'''mid_block.resnets.{j}.'''
a : Optional[Any] = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Any:
'''simple docstring'''
a : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
a : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
a : str = v.replace(_lowercase , _lowercase )
a : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
a : Any = v.replace(_lowercase , _lowercase )
a : Any = v
a : Any = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a : Any = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a : List[str] = F'''encoder.down_blocks.{i}.resnets.{j}.'''
a : Dict = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a : int = F'''down_blocks.{i}.downsamplers.0.'''
a : List[Any] = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a : Tuple = F'''up_blocks.{i}.upsamplers.0.'''
a : List[str] = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a : Union[str, Any] = F'''decoder.up_blocks.{i}.resnets.{j}.'''
a : Optional[Any] = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a : Dict = F'''mid_block.resnets.{i}.'''
a : Tuple = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->int:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
a : Tuple = v.replace(_lowercase , _lowercase )
a : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
a : int = v.replace(_lowercase , _lowercase )
a : int = v
a : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
a : int = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
a : Optional[int] = reshape_weight_for_sd(_lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
a : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a : str = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a : Optional[int] = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Union[str, Any]:
'''simple docstring'''
a : int = {}
a : Optional[Any] = {}
a : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
a : Dict = k[: -len(".q_proj.weight" )]
a : str = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
a : List[Any] = [None, None, None]
a : Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
a : str = k[: -len(".q_proj.bias" )]
a : Tuple = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
a : List[Any] = [None, None, None]
a : str = v
continue
a : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a : str = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : Dict = torch.cat(_lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a : Optional[Any] = textenc_pattern.sub(lambda _lowercase : protected[re.escape(m.group(0 ) )] , _lowercase )
a : str = torch.cat(_lowercase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
a : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a : Any = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
a : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
a : Dict = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a : Dict = load_file(unet_path, device='''cpu''')
else:
a : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
a : int = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
a : Dict = load_file(vae_path, device='''cpu''')
else:
a : Any = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
a : str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
a : str = load_file(text_enc_path, device='''cpu''')
else:
a : Union[str, Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
a : Dict = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
a : Any = convert_unet_state_dict(unet_state_dict)
a : List[Any] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a : Optional[int] = convert_vae_state_dict(vae_state_dict)
a : Tuple = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a : Optional[int] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a : int = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
a : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
a : Optional[int] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
a : str = convert_text_enc_state_dict(text_enc_dict)
a : List[Any] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a : Any = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a : str = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[Any] =["""image_processor""", """tokenizer"""]
lowerCamelCase : Any ="""CLIPImageProcessor"""
lowerCamelCase : List[str] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Any:
a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
a : int = kwargs.pop("feature_extractor" )
a : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
a : Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
a : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __a ( self ) -> Union[str, Any]:
a : List[str] = self.tokenizer.model_input_names
a : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Any:
a : Union[str, Any] = size if size is not None else {"height": 18, "width": 18}
a : Optional[int] = parent
a : Any = batch_size
a : str = num_channels
a : List[Any] = image_size
a : Dict = min_resolution
a : int = max_resolution
a : Optional[int] = do_resize
a : List[str] = size
a : Union[str, Any] = apply_ocr
def __a ( self ) -> Any:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __a ( self ) -> Union[str, Any]:
a : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def __a ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> int:
a : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) )
def __a ( self ) -> Union[str, Any]:
a : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
a : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __a ( self ) -> Any:
pass
def __a ( self ) -> str:
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __a ( self ) -> Union[str, Any]:
# Initialize image_processing
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : Dict = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __a ( self ) -> Optional[Any]:
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __a ( self ) -> Union[str, Any]:
# with apply_OCR = True
a : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
a : int = Image.open(ds[0]["file"] ).convert("RGB" )
a : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
a : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
a : List[str] = tuple[float, float, float]
a : Union[str, Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE ( _lowercase : Pointad , _lowercase : Pointad ) ->Vectorad:
'''simple docstring'''
a : Tuple = end_pointa[0] - end_pointa[0]
a : Dict = end_pointa[1] - end_pointa[1]
a : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( _lowercase : Vectorad , _lowercase : Vectorad ) ->Vectorad:
'''simple docstring'''
a : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
a : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
a : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( _lowercase : Vectorad , _lowercase : int ) ->bool:
'''simple docstring'''
return tuple(round(_lowercase , _lowercase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( _lowercase : Pointad , _lowercase : Pointad , _lowercase : Pointad , _lowercase : int = 10 ) ->bool:
'''simple docstring'''
a : str = create_vector(_lowercase , _lowercase )
a : List[Any] = create_vector(_lowercase , _lowercase )
return is_zero_vector(get_ad_vectors_cross(_lowercase , _lowercase ) , _lowercase )
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
a : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
a : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
with open(_lowercase , "rb" ) as f:
a : List[str] = Image.open(_lowercase )
return im.convert("RGB" )
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase : Optional[str] =field(default=a__ , metadata={"""help""": """A folder containing the training data."""} )
lowerCamelCase : Optional[str] =field(default=a__ , metadata={"""help""": """A folder containing the validation data."""} )
lowerCamelCase : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCamelCase : Optional[int] =field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCamelCase : Optional[int] =field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self ) -> List[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a__ )} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCamelCase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase : str =field(default=a__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCamelCase : bool =field(
default=a__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
a : Tuple = torch.stack([example["pixel_values"] for example in examples] )
a : Dict = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
a : Tuple = {}
if data_args.train_dir is not None:
a : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
a : Any = os.path.join(data_args.validation_dir , "**" )
a : Optional[Any] = load_dataset(
"imagefolder" , data_files=_lowercase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
a : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
a : int = dataset["train"].train_test_split(data_args.train_val_split )
a : str = split["train"]
a : Dict = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a : List[str] = dataset["train"].features["labels"].names
a : Union[str, Any] = {}, {}
for i, label in enumerate(_lowercase ):
a : int = str(_lowercase )
a : List[Any] = label
# Load the accuracy metric from the datasets package
a : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a : str = image_processor.size["shortest_edge"]
else:
a : List[str] = (image_processor.size["height"], image_processor.size["width"])
a : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a : int = Compose(
[
RandomResizedCrop(_lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a : List[str] = Compose(
[
Resize(_lowercase ),
CenterCrop(_lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(_lowercase : List[str] ):
a : List[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_lowercase : List[Any] ):
a : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a : Optional[int] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a : Any = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowercase )
# Initalize our trainer
a : Union[str, Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
a : List[str] = None
if training_args.resume_from_checkpoint is not None:
a : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : Dict = last_checkpoint
a : List[str] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _lowercase )
trainer.save_metrics("eval" , _lowercase )
# Write model card and (optionally) push to hub
a : List[str] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200_0000 ) ->int:
'''simple docstring'''
a : list[int] = [0]
a : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a : int = 0
# the area corresponding to the grid that gives the product closest to target
a : int = 0
# an estimate of b, using the quadratic formula
a : float
# the largest integer less than b_estimate
a : int
# the largest integer less than b_estimate
a : int
# the triangle number corresponding to b_floor
a : int
# the triangle number corresponding to b_ceil
a : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a : List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a : Any = floor(_lowercase )
a : Optional[Any] = ceil(_lowercase )
a : Union[str, Any] = triangle_numbers[b_floor]
a : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a : List[str] = triangle_b_first_guess * triangle_a
a : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a : Any = triangle_b_second_guess * triangle_a
a : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , _lowercase , _lowercase , _lowercase )
move_disk(_lowercase , _lowercase )
move_tower(height - 1 , _lowercase , _lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Any ) ->List[Any]:
'''simple docstring'''
print("moving disk from" , _lowercase , "to" , _lowercase )
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(_lowercase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Any = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""openai-gpt"""
lowerCamelCase : Tuple ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=4_0478 , lowerCAmelCase__=512 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__="cls_index" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ) -> List[Any]:
a : Any = vocab_size
a : Union[str, Any] = n_positions
a : Optional[Any] = n_embd
a : Dict = n_layer
a : Dict = n_head
a : List[str] = afn
a : str = resid_pdrop
a : Any = embd_pdrop
a : List[str] = attn_pdrop
a : Dict = layer_norm_epsilon
a : List[str] = initializer_range
a : Union[str, Any] = summary_type
a : Union[str, Any] = summary_use_proj
a : Any = summary_activation
a : Any = summary_first_dropout
a : Union[str, Any] = summary_proj_to_labels
super().__init__(**lowerCAmelCase__ )
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str , _lowercase : int ) ->List[str]:
'''simple docstring'''
a : List[Any] = Path(_lowercase )
a : str = Path(_lowercase )
dest_dir.mkdir(exist_ok=_lowercase )
for path in src_dir.iterdir():
a : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
a : Optional[Any] = dest_dir.joinpath(path.name )
print(_lowercase )
dest_path.open("w" ).write("\n".join(_lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Union[str, Any]:
a : Any = parent
a : List[Any] = batch_size
a : List[str] = image_size
a : str = patch_size
a : Union[str, Any] = num_channels
a : Optional[Any] = is_training
a : Optional[int] = use_labels
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : List[Any] = num_attention_heads
a : Tuple = intermediate_size
a : Optional[Any] = hidden_act
a : Dict = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : str = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Dict = scope
a : Tuple = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Tuple = (image_size // patch_size) ** 2
a : int = num_patches + 1
def __a ( self ) -> List[str]:
a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Union[str, Any] = None
if self.use_labels:
a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Any = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[Any]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Union[str, Any] = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : str = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a : int = 1
a : List[str] = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : Optional[int] = self.type_sequence_label_size
a : str = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : int = 1
a : Dict = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Tuple:
a : List[Any] = self.prepare_config_and_inputs()
(
a
) : int = config_and_inputs
a : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : int =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : Any =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple =True
lowerCamelCase : Optional[Any] =False
lowerCamelCase : List[Any] =False
lowerCamelCase : Union[str, Any] =False
def __a ( self ) -> int:
a : Optional[Any] = ViTModelTester(self )
a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __a ( self ) -> Any:
pass
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __a ( self ) -> List[str]:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[int] = model_class(lowerCAmelCase__ )
a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Tuple = [*signature.parameters.keys()]
a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> str:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self ) -> Any:
a : Tuple = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCAmelCase__ )
a : str = self.default_image_processor
a : Optional[Any] = prepare_img()
a : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a : str = model(**lowerCAmelCase__ )
# verify the logits
a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a : Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def __a ( self ) -> Optional[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
a : Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCAmelCase__ )
a : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
a : Optional[Any] = prepare_img()
a : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
a : Dict = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a : Union[str, Any] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
a : Dict = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
a : Optional[int] = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self ) -> Optional[int]:
a : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
a : Optional[int] = self.default_image_processor
a : Tuple = prepare_img()
a : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
a : int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Dict = model(lowerCAmelCase__ )
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=32 * 8 , lowerCAmelCase__=32 * 8 , lowerCAmelCase__=4 , lowerCAmelCase__=64 , ) -> Tuple:
a : List[str] = parent
a : Optional[int] = batch_size
a : str = is_training
a : List[str] = use_auxiliary_loss
a : Dict = num_queries
a : int = num_channels
a : int = min_size
a : Optional[Any] = max_size
a : str = num_labels
a : List[str] = hidden_dim
a : Optional[int] = hidden_dim
def __a ( self ) -> Union[str, Any]:
a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
a : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
a : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
a : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ) -> Tuple:
a : str = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a : Any = self.num_queries
a : str = self.num_labels
a : Union[str, Any] = [1, 1, 1, 1]
a : str = self.num_channels
a : List[Any] = 64
a : Any = 128
a : Optional[int] = self.hidden_dim
a : List[Any] = self.hidden_dim
a : Optional[Any] = self.hidden_dim
return config
def __a ( self ) -> Optional[int]:
a : List[str] = self.prepare_config_and_inputs()
a : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = output.encoder_hidden_states
a : Optional[int] = output.pixel_decoder_hidden_states
a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
with torch.no_grad():
a : List[Any] = MaskaFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Optional[int] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
a : Union[str, Any] = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Any = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a : List[str] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
a : Union[str, Any] = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
a : Tuple = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : List[str] ={"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : int =False
lowerCamelCase : Dict =False
lowerCamelCase : Any =False
lowerCamelCase : List[Any] =False
def __a ( self ) -> int:
a : str = MaskaFormerModelTester(self )
a : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> int:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> Any:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __a ( self ) -> Dict:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __a ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self ) -> List[str]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> int:
pass
def __a ( self ) -> int:
a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(lowerCAmelCase__ )
a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a : Optional[Any] = MaskaFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = (self.model_tester.min_size,) * 2
a : List[str] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
a : List[Any] = self.model_tester.get_config()
a : Union[str, Any] = MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[Any] = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def __a ( self ) -> Optional[int]:
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __a ( self ) -> int:
a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : Union[str, Any] = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def __a ( self ) -> int:
if not self.model_tester.is_training:
return
a : Union[str, Any] = self.all_model_classes[1]
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
a : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a : List[str] = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def __a ( self ) -> List[str]:
a : int = self.all_model_classes[1]
a : int = self.model_tester.prepare_config_and_inputs()
a : Tuple = True
a : int = True
a : Optional[int] = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
model.train()
a : int = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
a : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : Tuple = 1E-4
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
a : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __a ( self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __a ( self ) -> str:
a : str = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ )
a : int = self.default_image_processor
a : Tuple = prepare_img()
a : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
a : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a : int = model(**lowerCAmelCase__ )
a : Union[str, Any] = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
a : Optional[int] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
a : Dict = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __a ( self ) -> int:
a : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
a : List[str] = self.default_image_processor
a : Dict = prepare_img()
a : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
a : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a : Optional[Any] = model(**lowerCAmelCase__ )
# masks_queries_logits
a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a : Any = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
a : Any = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
a : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a : Union[str, Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
a : List[Any] = self.default_image_processor
a : Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a : int = inputs["pixel_values"].to(lowerCAmelCase__ )
a : Union[str, Any] = [el.to(lowerCAmelCase__ ) for el in inputs["mask_labels"]]
a : str = [el.to(lowerCAmelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
a : Dict = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __UpperCamelCase ( a__ ):
lowerCamelCase : int ="""M-CLIP"""
def __init__( self , lowerCAmelCase__=1024 , lowerCAmelCase__=768 , **lowerCAmelCase__ ) -> Optional[Any]:
a : str = transformerDimSize
a : Any = imageDimSize
super().__init__(**lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a : List[str] = XLMRobertaModel(lowerCAmelCase__ )
a : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : int = self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =(KDPMaDiscreteScheduler,)
lowerCamelCase : Any =10
def __a ( self , **lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def __a ( self ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __a ( self ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Optional[Any] = self.scheduler_classes[0]
a : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
a : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a : List[str] = self.dummy_model()
a : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[Any] = output.prev_sample
a : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
a : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __a ( self ) -> Tuple:
if torch_device == "mps":
return
a : str = self.scheduler_classes[0]
a : str = self.get_scheduler_config()
a : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
a : Dict = self.dummy_model()
a : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : int = output.prev_sample
a : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
a : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __a ( self ) -> Dict:
if torch_device == "mps":
return
a : Any = self.scheduler_classes[0]
a : Tuple = self.get_scheduler_config()
a : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
a : Optional[Any] = self.dummy_model()
a : int = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = output.prev_sample
a : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
a : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : np.array ) ->np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
from collections.abc import Sequence
from queue import Queue
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[int]:
a : List[str] = start
a : Dict = end
a : Optional[int] = val
a : List[str] = (start + end) // 2
a : Any = left
a : str = right
def __repr__( self ) -> List[Any]:
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = collection
a : Optional[int] = function
if self.collection:
a : int = self._build_tree(0 , len(lowerCAmelCase__ ) - 1 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
self._update_tree(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self._query_range(self.root , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if start == end:
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.collection[start] )
a : int = (start + end) // 2
a : List[str] = self._build_tree(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = self._build_tree(mid + 1 , lowerCAmelCase__ )
return SegmentTreeNode(lowerCAmelCase__ , lowerCAmelCase__ , self.fn(left.val , right.val ) , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if node.start == i and node.end == i:
a : str = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
self._update_tree(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.fn(node.left.val , node.right.val )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCAmelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowerCAmelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
if self.root is not None:
a : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
a : Any = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
a : List[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=None ) -> Optional[int]:
a : Optional[Any] = np.random.default_rng(lowerCAmelCase__ )
a : str = length
a : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
a : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> str:
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> str:
super().__init__()
a : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : List[str] = True
def __a ( self , lowerCAmelCase__=None ) -> List[str]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : Tuple = False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> int:
super().__init__()
a : Tuple = torch.nn.Parameter(torch.tensor(lowerCAmelCase__ ).float() )
a : Any = torch.nn.Parameter(torch.tensor(lowerCAmelCase__ ).float() )
a : Optional[int] = True
def __a ( self , lowerCAmelCase__=None ) -> Tuple:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : Optional[Any] = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int = 16 ) ->Dict:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
a : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
a : Optional[int] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
a : str = load_dataset("csv" , data_files=_lowercase )
a : Dict = datasets["train"].unique("label" )
a : List[str] = {v: i for i, v in enumerate(_lowercase )}
def tokenize_function(_lowercase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
a : List[str] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowercase , max_length=_lowercase , padding="max_length" )
if "label" in examples:
a : Optional[Any] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowercase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a : List[Any] = DataLoader(tokenized_datasets["train"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=2 )
a : Optional[Any] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_A : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
if not is_accelerate_available():
return method
a = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCAmelCase__ ) < version.parse("0.17.0" ):
return method
def wrapper(self :List[Any] , *UpperCAmelCase__ :int , **UpperCAmelCase__ :int ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCAmelCase__ , **UpperCAmelCase__ )
return wrapper
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
import heapq
import sys
import numpy as np
A_ : List[str] = tuple[int, int]
class _lowercase :
def __init__( self : Any ) -> Tuple:
"""simple docstring"""
a = []
a = set()
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def A ( self : List[str] ) -> str:
"""simple docstring"""
return len(self.elements ) == 0
def A ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> int:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCAmelCase )
else:
# update
# print("update", item)
a = []
((a) , (a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((a) , (a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(__lowerCAmelCase )
a = []
((a) , (a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((a) , (a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def A ( self : str ) -> str:
"""simple docstring"""
((a) , (a)) = heapq.heappop(self.elements )
self.set.remove(__lowerCAmelCase )
return (priority, item)
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos , UpperCAmelCase__ :TPos ):
'''simple docstring'''
a = np.array(UpperCAmelCase__ )
a = np.array(UpperCAmelCase__ )
return np.linalg.norm(a - b )
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos , UpperCAmelCase__ :TPos ):
'''simple docstring'''
return consistent_heuristic(UpperCAmelCase__ , UpperCAmelCase__ ) // t
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos , UpperCAmelCase__ :TPos ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos , UpperCAmelCase__ :int , UpperCAmelCase__ :TPos , UpperCAmelCase__ :dict[TPos, float] ):
'''simple docstring'''
a = g_function[start] + Wa * heuristics[i](UpperCAmelCase__ , UpperCAmelCase__ )
return ans
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = np.chararray((n, n) )
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
a = "*"
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
a = "#"
a = "-"
a = back_pointer[goal]
while x != start:
((a) , (a)) = x
# print(x)
a = "-"
a = back_pointer[x]
a = "-"
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
a = back_pointer[goal]
while x != start:
print(UpperCAmelCase__ , end=" " )
a = back_pointer[x]
print(UpperCAmelCase__ )
sys.exit()
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Any , UpperCAmelCase__ :Any , UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Any , UpperCAmelCase__ :int , UpperCAmelCase__ :List[str] , ):
'''simple docstring'''
for itera in range(UpperCAmelCase__ ):
open_list[itera].remove_element(UpperCAmelCase__ )
# print("s", s)
# print("j", j)
((a) , (a)) = s
a = (x - 1, y)
a = (x + 1, y)
a = (x, y + 1)
a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCAmelCase__ )
a = -1
a = float("inf" )
if valid(UpperCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
a = g_function[s] + 1
a = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCAmelCase__ , key(UpperCAmelCase__ , 0 , UpperCAmelCase__ , UpperCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCAmelCase__ ):
if key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) <= Wa * key(
UpperCAmelCase__ , 0 , UpperCAmelCase__ , UpperCAmelCase__ ):
open_list[j].put(
UpperCAmelCase__ , key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
A_ : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
A_ : Dict = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
A_ : int = make_common_ground()
A_ : Optional[int] = blocks_blk
# hyper parameters
A_ : str = 1
A_ : List[str] = 1
A_ : List[Any] = 20
A_ : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
A_ : Optional[Any] = (0, 0)
A_ : Union[str, Any] = (n - 1, n - 1)
A_ : Union[str, Any] = 1
def UpperCAmelCase__ ( UpperCAmelCase__ :TPos , UpperCAmelCase__ :TPos , UpperCAmelCase__ :int ):
'''simple docstring'''
a = {start: 0, goal: float("inf" )}
a = {start: -1, goal: -1}
a = []
a = set()
for i in range(UpperCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCAmelCase__ , key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
a = []
a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , UpperCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
a , a = open_list[i].top_show()
visited.add(UpperCAmelCase__ )
expand_state(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
close_list_inad.append(UpperCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
a = open_list[0].top_show()
visited.add(UpperCAmelCase__ )
expand_state(
UpperCAmelCase__ , 0 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
close_list_anchor.append(UpperCAmelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCAmelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCAmelCase = field(
default='''summarization''', metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''}, )
_UpperCAmelCase = field(
default=1_024, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=142, metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
}, )
_UpperCAmelCase = field(
default=142, metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=-1, metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Source language id for translation.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Target language id for translation.'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''}, )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , F"""{split}_results.json""" ) )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert hasattr(UpperCAmelCase__ , UpperCAmelCase__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a = SeqaSeqDataset
# Get datasets
a = (
dataset_class(
UpperCAmelCase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a = (
dataset_class(
UpperCAmelCase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a = (
dataset_class(
UpperCAmelCase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase__ ) if training_args.predict_with_generate else None
)
a = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase__ , UpperCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
a = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a = train_result.metrics
a = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate(metric_key_prefix="val" )
a = data_args.n_val
a = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a = trainer.predict(test_dataset=UpperCAmelCase__ , metric_key_prefix="test" )
a = test_output.metrics
a = data_args.n_test
if trainer.is_world_process_zero():
a = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase__ , training_args.output_dir )
all_metrics.update(UpperCAmelCase__ )
if training_args.predict_with_generate:
a = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
a = lmap(str.strip , UpperCAmelCase__ )
write_txt_file(UpperCAmelCase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ : List[Any] = TypeVar('''T''')
A_ : int = Union[List[T], Tuple[T, ...]]
A_ : Tuple = Union[T, List[T], Dict[str, T]]
A_ : int = Union[str, bytes, os.PathLike]
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (EulerDiscreteScheduler,)
_UpperCAmelCase = 10
def A ( self : Union[str, Any] , **__lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
a = {
"num_train_timesteps": 1100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCAmelCase ) )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCAmelCase ) )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a = sample.to(__lowerCAmelCase )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCAmelCase ) )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase , use_karras_sigmas=__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a = sample.to(__lowerCAmelCase )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCAmelCase ) )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
| 32
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : str = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''bert'''
def __init__( self : int , __lowerCAmelCase : Any=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Tuple=0.0_2 , __lowerCAmelCase : Any=1E-12 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Any="absolute" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=None , **__lowerCAmelCase : str , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class _lowercase ( UpperCAmelCase__ ):
@property
def A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
a , a = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
a = "A painting of a squirrel eating a burger"
a = jax.device_count()
a = num_samples * [prompt]
a = sd_pipe.prepare_inputs(__lowerCAmelCase )
a = replicate(__lowerCAmelCase )
a = shard(__lowerCAmelCase )
a = jax.random.PRNGKey(0 )
a = jax.random.split(__lowerCAmelCase , jax.device_count() )
a = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a = images[0, 253:256, 253:256, -1]
a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = "stabilityai/stable-diffusion-2"
a , a = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="scheduler" )
a , a = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
a = scheduler_params
a = "A painting of a squirrel eating a burger"
a = jax.device_count()
a = num_samples * [prompt]
a = sd_pipe.prepare_inputs(__lowerCAmelCase )
a = replicate(__lowerCAmelCase )
a = shard(__lowerCAmelCase )
a = jax.random.PRNGKey(0 )
a = jax.random.split(__lowerCAmelCase , jax.device_count() )
a = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a = images[0, 253:256, 253:256, -1]
a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowercase ( UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
a = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCAmelCase )
a = tokenizer
def A ( self : str ) -> List[str]:
"""simple docstring"""
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self : Dict ) -> List[str]:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCAmelCase ) , 10_1122 )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def A ( self : str ) -> Optional[int]:
"""simple docstring"""
a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
a = [0, 57, 3018, 7_0307, 91, 2]
a = self.tokenizer(
__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCAmelCase )
a = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
a = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
a = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCAmelCase )
a = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCAmelCase , )
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Dict = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''luke'''
def __init__( self : Tuple , __lowerCAmelCase : Dict=5_0267 , __lowerCAmelCase : List[Any]=50_0000 , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : Union[str, Any]=256 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[str]=3072 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-12 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Dict=2 , **__lowerCAmelCase : Tuple , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase__ ( UpperCAmelCase__ :dict ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ):
'''simple docstring'''
a = XGBClassifier()
classifier.fit(UpperCAmelCase__ , UpperCAmelCase__ )
return classifier
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = load_iris()
a , a = data_handling(UpperCAmelCase__ )
a , a , a , a = train_test_split(
UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(UpperCAmelCase__ , UpperCAmelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , display_labels=UpperCAmelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :list ):
'''simple docstring'''
a = len(UpperCAmelCase__ )
for i in range(1 , UpperCAmelCase__ ):
a = collection[i]
a = 0
a = i - 1
while low <= high:
a = (low + high) // 2
if val < collection[mid]:
a = mid - 1
else:
a = mid + 1
for j in range(UpperCAmelCase__ , UpperCAmelCase__ , -1 ):
a = collection[j - 1]
a = val
return collection
if __name__ == "__main__":
A_ : str = input('''Enter numbers separated by a comma:\n''').strip()
A_ : int = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[Any]=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=10 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : Optional[Any]=[1, 16, 4, 4] , __lowerCAmelCase : Tuple=None , ) -> int:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = scope
a = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a = (self.image_size // 32) ** 2
a = num_patches + 1
def A ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Any ) -> List[str]:
"""simple docstring"""
a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowerCAmelCase , )
def A ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = ViTHybridModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.type_sequence_label_size
a = ViTHybridForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Dict ) -> Tuple:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = ViTHybridModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A ( self : str ) -> Any:
"""simple docstring"""
pass
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A ( self : Tuple ) -> str:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : str ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : List[Any] ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
a = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ViTHybridModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : str ) -> List[str]:
"""simple docstring"""
a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def A ( self : int ) -> int:
"""simple docstring"""
a = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
a = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
a = prepare_img()
a = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
a = model(**__lowerCAmelCase )
a = outputs.logits
# model predicts one of the 1000 ImageNet classes
a = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
import argparse
import os
import re
A_ : Tuple = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
A_ : List[Any] = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : str = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : Any = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : Tuple = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : Dict = re.compile(r'''\[([^\]]+)\]''')
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
a = _re_indent.search(UpperCAmelCase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Any="" , UpperCAmelCase__ :Tuple=None , UpperCAmelCase__ :Optional[Any]=None ):
'''simple docstring'''
a = 0
a = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase__ ):
index += 1
a = ["\n".join(lines[:index] )]
else:
a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a = [lines[index]]
index += 1
while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCAmelCase__ ) )
if index < len(UpperCAmelCase__ ) - 1:
a = [lines[index + 1]]
index += 1
else:
a = []
else:
blocks.append("\n".join(UpperCAmelCase__ ) )
a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase__ ) > 0:
blocks.append("\n".join(UpperCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
def _inner(UpperCAmelCase__ :Tuple ):
return key(UpperCAmelCase__ ).lower().replace("_" , "" )
return _inner
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Dict=None ):
'''simple docstring'''
def noop(UpperCAmelCase__ :int ):
return x
if key is None:
a = noop
# Constants are all uppercase, they go first.
a = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
a = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()]
a = ignore_underscore(UpperCAmelCase__ )
return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
def _replace(UpperCAmelCase__ :Optional[Any] ):
a = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase__ )] ) + "]"
a = import_statement.split("\n" )
if len(UpperCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a = 2 if lines[1].strip() == "[" else 1
a = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a = sort_objects(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )
a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a = _re_bracket_content.sub(_replace , lines[1] )
else:
a = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
a = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase__ )] )
return "\n".join(UpperCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
a = _re_bracket_content.sub(_replace , UpperCAmelCase__ )
return import_statement
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Optional[Any]=True ):
'''simple docstring'''
with open(UpperCAmelCase__ , "r" ) as f:
a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a = split_code_in_indented_blocks(
UpperCAmelCase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a = main_blocks[block_idx]
a = block.split("\n" )
# Get to the start of the imports.
a = 0
while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a = len(UpperCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
a = "\n".join(block_lines[line_idx:-1] )
a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
a = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None]
a = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a = 0
a = []
for i in range(len(UpperCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
a = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCAmelCase__ , "w" ) as f:
f.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :int=True ):
'''simple docstring'''
a = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
a = sort_imports(os.path.join(UpperCAmelCase__ , "__init__.py" ) , check_only=UpperCAmelCase__ )
if result:
a = [os.path.join(UpperCAmelCase__ , "__init__.py" )]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCAmelCase__ )} files, run `make style`.""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
A_ : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=13 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=99 , __lowerCAmelCase : int=32 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : List[Any]=4 , ) -> Union[str, Any]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def A ( self : str ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Tuple ) -> str:
"""simple docstring"""
a = FlaxRobertaModelTester(self )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("roberta-base" , from_pt=__lowerCAmelCase )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("only integers accepted as input" )
else:
a = str(abs(UpperCAmelCase__ ) )
a = [list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )]
for index in range(len(UpperCAmelCase__ ) ):
num_transpositions[index].pop(UpperCAmelCase__ )
return max(
int("".join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''openai-gpt'''
_UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , __lowerCAmelCase : Dict=4_0478 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : int=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=0.1 , **__lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :float = 0.0 , UpperCAmelCase__ :float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = field(default='''automatic-speech-recognition''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase = Features({'''audio''': Audio()} )
_UpperCAmelCase = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase = "audio"
_UpperCAmelCase = "transcription"
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCAmelCase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
a = copy.deepcopy(self )
a = self.input_schema.copy()
a = features[self.audio_column]
a = input_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
A_ : List[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
a = 0
a = 0
while place < len(UpperCAmelCase__ ):
if (place + 1 < len(UpperCAmelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = []
for arabic, roman in ROMAN:
((a) , (a)) = divmod(UpperCAmelCase__ , UpperCAmelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.