code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
import unittest
from knapsack import knapsack as k
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = [0]
_UpperCamelCase = [0]
_UpperCamelCase = len(lowercase_)
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_) , 0)
_UpperCamelCase = [60]
_UpperCamelCase = [10]
_UpperCamelCase = len(lowercase_)
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_) , 0)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = [1, 2, 3]
_UpperCamelCase = [3, 2, 1]
_UpperCamelCase = len(lowercase_)
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_) , 5)
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 50
_UpperCamelCase = [60, 100, 120]
_UpperCamelCase = [10, 20, 30]
_UpperCamelCase = len(lowercase_)
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_) , 220)
if __name__ == "__main__":
unittest.main()
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(a__ , a__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(a__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ = 5_0003
lowerCamelCase__ = 5_0002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = PLBartTokenizer
__A = None
__A = False
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = PLBartTokenizer(lowercase_ , language_codes="base" , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = PLBartTokenizer(lowercase_ , language_codes="base" , keep_accents=lowercase_)
_UpperCamelCase = tokenizer.tokenize("This is a test")
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = [tokenizer.convert_ids_to_tokens(lowercase_) for x in range(end - 4 , lowercase_)]
self.assertListEqual(lowercase_ , ["__java__", "__python__", "__en_XX__", "<mask>"])
_UpperCamelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_UpperCamelCase = tokenizer(lowercase_).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_) , lowercase_ , )
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = PLBartTokenizer(lowercase_ , language_codes="multi" , keep_accents=lowercase_)
_UpperCamelCase = tokenizer.tokenize("This is a test")
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = [tokenizer.convert_ids_to_tokens(lowercase_) for x in range(end - 7 , lowercase_)]
self.assertListEqual(
lowercase_ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
_UpperCamelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_UpperCamelCase = tokenizer(lowercase_).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = '''uclanlp/plbart-python-en_XX'''
__A = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__A = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__A = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
_UpperCamelCase = 1
return cls
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003)
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
self.assertIn(lowercase_ , self.tokenizer.all_special_ids)
_UpperCamelCase = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
_UpperCamelCase = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertNotIn(self.tokenizer.eos_token , lowercase_)
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , lowercase_)
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowercase_)
self.assertEqual(len(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [50004, 50001])
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_)
_UpperCamelCase = PLBartTokenizer.from_pretrained(lowercase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_)
@require_torch
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors="pt")
_UpperCamelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
_UpperCamelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="pt")
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="pt")
_UpperCamelCase = targets["input_ids"]
_UpperCamelCase = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(lowercase_) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( a__ , a__ = 0.0 , a__ = 1.0 ) ->int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = BarthezTokenizer
__A = BarthezTokenizerFast
__A = True
__A = True
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
super().setUp()
_UpperCamelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase_)
_UpperCamelCase = tokenizer
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(lowercase_) , 101122)
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCamelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCamelCase = self.tokenizer(
lowercase_ , max_length=len(lowercase_) , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowercase_)
_UpperCamelCase = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@slow
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCamelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowercase_ , )
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''pixel_values''']
def __init__( self : List[Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = size if size is not None else {"shortest_edge": 384}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 224 / 256
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}')
_UpperCamelCase = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct)
_UpperCamelCase = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> Dict:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : str , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any=0) -> Any:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((128, 128))
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((128, 128))
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler")
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCamelCase__ = (720, 1280) # Height, Width
lowerCamelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCamelCase__ = 1 / 100
lowerCamelCase__ = ''''''
lowerCamelCase__ = ''''''
lowerCamelCase__ = ''''''
lowerCamelCase__ = 250
def lowerCAmelCase__ ( ) ->None:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = get_dataset(a__ , a__ )
for index in range(a__ ):
_UpperCamelCase = random.sample(range(len(a__ ) ) , 4 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = update_image_and_anno(
a__ , a__ , a__ , a__ , a__ , filter_scale=a__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase = random_chars(32 )
_UpperCamelCase = path.split(os.sep )[-1].rsplit("." , 1 )[0]
_UpperCamelCase = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , a__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
_UpperCamelCase = []
for anno in new_annos:
_UpperCamelCase = anno[3] - anno[1]
_UpperCamelCase = anno[4] - anno[2]
_UpperCamelCase = anno[1] + width / 2
_UpperCamelCase = anno[2] + height / 2
_UpperCamelCase = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(a__ )
with open(f'{file_root}.txt' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowerCAmelCase__ ( a__ , a__ ) ->tuple[list, list]:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = []
for label_file in glob.glob(os.path.join(a__ , "*.txt" ) ):
_UpperCamelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(a__ ) as in_file:
_UpperCamelCase = in_file.readlines()
_UpperCamelCase = os.path.join(a__ , f'{label_name}.jpg' )
_UpperCamelCase = []
for obj_list in obj_lists:
_UpperCamelCase = obj_list.rstrip("\n" ).split(" " )
_UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a__ )
labels.append(a__ )
return img_paths, labels
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ = 0.0 , ) ->tuple[list, list, str]:
'''simple docstring'''
_UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = int(scale_x * output_size[1] )
_UpperCamelCase = int(scale_y * output_size[0] )
_UpperCamelCase = []
_UpperCamelCase = []
for i, index in enumerate(a__ ):
_UpperCamelCase = all_img_list[index]
path_list.append(a__ )
_UpperCamelCase = all_annos[index]
_UpperCamelCase = cva.imread(a__ )
if i == 0: # top-left
_UpperCamelCase = cva.resize(a__ , (divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase = cva.resize(a__ , (output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase = cva.resize(a__ , (divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase = cva.resize(
a__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(a__ ) for _ in range(a__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase__ = get_logger(__name__)
lowerCamelCase__ = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowercase_)
def __call__( self : List[Any] , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowercase_)
def __call__( self : Tuple , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(lowercase_)
def __call__( self : Optional[int] , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int , **lowercase_ : Dict) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
_UpperCamelCase = inspect.signature(processor.__call__).parameters
if len(lowercase_) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys())} for '
f'{processor.__class__} are passed to the logits processor.')
_UpperCamelCase = processor(lowercase_ , lowercase_ , lowercase_ , **lowercase_)
else:
_UpperCamelCase = processor(lowercase_ , lowercase_ , lowercase_)
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : float) -> Tuple:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}')
_UpperCamelCase = temperature
def __call__( self : Any , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = scores / self.temperature
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : float , lowercase_ : float = -float("Inf") , lowercase_ : int = 1) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')
if not isinstance(lowercase_ , lowercase_) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')
_UpperCamelCase = top_p
_UpperCamelCase = filter_value
_UpperCamelCase = min_tokens_to_keep
def __call__( self : Any , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = lax.top_k(lowercase_ , scores.shape[-1])
_UpperCamelCase = jnp.full_like(lowercase_ , self.filter_value)
_UpperCamelCase = jax.nn.softmax(lowercase_ , axis=-1).cumsum(axis=-1)
_UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_UpperCamelCase = jnp.roll(lowercase_ , 1)
score_mask |= score_mask.at[:, 0].set(lowercase_)
# min tokens to keep
_UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase_)
_UpperCamelCase = jnp.where(lowercase_ , lowercase_ , lowercase_)
_UpperCamelCase = jax.lax.sort_key_val(lowercase_ , lowercase_)[-1]
return next_scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : int , lowercase_ : float = -float("Inf") , lowercase_ : int = 1) -> Any:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')
_UpperCamelCase = max(lowercase_ , lowercase_)
_UpperCamelCase = filter_value
def __call__( self : List[Any] , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = scores.shape
_UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value)
_UpperCamelCase = min(self.top_k , scores.shape[-1]) # Safety check
_UpperCamelCase , _UpperCamelCase = lax.top_k(lowercase_ , lowercase_)
_UpperCamelCase = jnp.broadcast_to((jnp.arange(lowercase_) * vocab_size)[:, None] , (batch_size, topk)).flatten()
_UpperCamelCase = topk_scores.flatten()
_UpperCamelCase = topk_indices.flatten() + shift
_UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(lowercase_)
_UpperCamelCase = next_scores_flat.reshape(lowercase_ , lowercase_)
return next_scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = bos_token_id
def __call__( self : Union[str, Any] , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.full(scores.shape , -float("inf"))
_UpperCamelCase = 1 - jnp.bool_(cur_len - 1)
_UpperCamelCase = jnp.where(lowercase_ , new_scores.at[:, self.bos_token_id].set(0) , lowercase_)
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = max_length
_UpperCamelCase = eos_token_id
def __call__( self : str , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.full(scores.shape , -float("inf"))
_UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1)
_UpperCamelCase = jnp.where(lowercase_ , new_scores.at[:, self.eos_token_id].set(0) , lowercase_)
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : int , lowercase_ : int) -> Dict:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')
if not isinstance(lowercase_ , lowercase_) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')
_UpperCamelCase = min_length
_UpperCamelCase = eos_token_id
def __call__( self : Tuple , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
_UpperCamelCase = jnp.where(lowercase_ , scores.at[:, self.eos_token_id].set(-float("inf")) , lowercase_)
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = list(lowercase_)
_UpperCamelCase = begin_index
def __call__( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index)
_UpperCamelCase = jnp.where(lowercase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf")) , lowercase_)
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : list) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = list(lowercase_)
def __call__( self : Union[str, Any] , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float("inf"))
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = dict(lowercase_)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_UpperCamelCase = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
_UpperCamelCase = force_token_array.at[index].set(lowercase_)
_UpperCamelCase = jnp.intaa(lowercase_)
def __call__( self : Tuple , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : int) -> jnp.ndarray:
"""simple docstring"""
def _force_token(lowercase_ : Optional[Any]):
_UpperCamelCase = scores.shape[0]
_UpperCamelCase = self.force_token_array[generation_idx]
_UpperCamelCase = jnp.ones_like(lowercase_ , dtype=scores.dtype) * -float("inf")
_UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
_UpperCamelCase = lax.dynamic_update_slice(lowercase_ , lowercase_ , (0, current_token))
return new_scores
_UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase_) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = generate_config.eos_token_id
_UpperCamelCase = generate_config.no_timestamps_token_id
_UpperCamelCase = generate_config.no_timestamps_token_id + 1
_UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowercase_ , "max_initial_timestamp_index"):
_UpperCamelCase = generate_config.max_initial_timestamp_index
else:
_UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_UpperCamelCase = model_config.vocab_size
def __call__( self : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
def handle_pairs(lowercase_ : Optional[Any] , lowercase_ : int):
_UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , lowercase_ , lowercase_)
_UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase_ , )
_UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , lowercase_ , lowercase_)
_UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase_ , lowercase_ , )
return jnp.where(
lowercase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf")) , scores_k.at[: self.eos_token_id].set(-float("inf")) , ) , lowercase_ , )
_UpperCamelCase = jax.vmap(lowercase_)(lowercase_ , lowercase_)
_UpperCamelCase = jnp.where(cur_len == self.begin_index , lowercase_ , lowercase_)
_UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase_ , )
_UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
_UpperCamelCase = jnp.where(
lowercase_ , scores.at[:, last_allowed + 1 :].set(-float("inf")) , lowercase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_UpperCamelCase = jax.nn.log_softmax(lowercase_ , axis=-1)
def handle_cumulative_probs(lowercase_ : int , lowercase_ : List[str]):
_UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
_UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf")) , lowercase_ , )
_UpperCamelCase = jax.vmap(lowercase_)(lowercase_ , lowercase_)
return scores
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''pixel_values''']
def __init__( self : Tuple , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_UpperCamelCase = get_size_dict(lowercase_ , param_name="crop_size")
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
_UpperCamelCase = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> int:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(lowercase_ , data_format=lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowercase_ , param_name="crop_size")
_UpperCamelCase = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_UpperCamelCase = [self.flip_channel_order(image=lowercase_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : List[Tuple] = None) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(lowercase_):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowercase_)):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_)
_UpperCamelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
_UpperCamelCase = logits.argmax(dim=1)
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
import baseaa
def lowerCAmelCase__ ( a__ ) ->bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return baseaa.baadecode(a__ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = '''Hello World!'''
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''', '''tokenizer''']
__A = '''CLIPImageProcessor'''
__A = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , **lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_UpperCamelCase = kwargs.pop("feature_extractor")
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowercase_ , lowercase_)
def __call__( self : Tuple , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , **lowercase_ : str) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def __UpperCAmelCase ( self : int , *lowercase_ : Any , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''esm'''
def __init__( self : Tuple , lowercase_ : str=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : int=768 , lowercase_ : int=12 , lowercase_ : Any=12 , lowercase_ : Optional[Any]=3072 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=1026 , lowercase_ : int=0.02 , lowercase_ : Any=1e-1_2 , lowercase_ : Tuple="absolute" , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = emb_layer_norm_before
_UpperCamelCase = token_dropout
_UpperCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values.")
_UpperCamelCase = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_):
_UpperCamelCase = EsmFoldConfig(**lowercase_)
_UpperCamelCase = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
_UpperCamelCase = get_default_vocab_list()
else:
_UpperCamelCase = vocab_list
else:
_UpperCamelCase = None
_UpperCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowercase_):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().to_dict()
if isinstance(self.esmfold_config , lowercase_):
_UpperCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = None
__A = True
__A = False
__A = False
__A = False
__A = 0
__A = True
__A = False
__A = 128
__A = None
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
if self.trunk is None:
_UpperCamelCase = TrunkConfig()
elif isinstance(self.trunk , lowercase_):
_UpperCamelCase = TrunkConfig(**self.trunk)
def __UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
_UpperCamelCase = asdict(self)
_UpperCamelCase = self.trunk.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 48
__A = 1_024
__A = 128
__A = 32
__A = 32
__A = 32
__A = 0
__A = 0
__A = False
__A = 4
__A = 128
__A = None
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
if self.structure_module is None:
_UpperCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_):
_UpperCamelCase = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f' {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
_UpperCamelCase = self.sequence_state_dim // self.sequence_head_width
_UpperCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.')
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = asdict(self)
_UpperCamelCase = self.structure_module.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 384
__A = 128
__A = 16
__A = 128
__A = 12
__A = 4
__A = 8
__A = 0.1
__A = 8
__A = 1
__A = 2
__A = 7
__A = 10
__A = 1e-8
__A = 1e5
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return asdict(self)
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
from manim import *
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = Rectangle(height=0.5 , width=0.5)
_UpperCamelCase = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
_UpperCamelCase = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
_UpperCamelCase = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
_UpperCamelCase = Text("CPU" , font_size=24)
_UpperCamelCase = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
_UpperCamelCase = [mem.copy() for i in range(1)]
_UpperCamelCase = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
_UpperCamelCase = Text("GPU" , font_size=24)
_UpperCamelCase = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.align_to(lowercase_ , lowercase_)
gpu.set_x(gpu.get_x() - 1)
self.add(lowercase_)
_UpperCamelCase = [mem.copy() for i in range(6)]
_UpperCamelCase = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
_UpperCamelCase = Text("Model" , font_size=24)
_UpperCamelCase = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.play(
Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , )
_UpperCamelCase = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_UpperCamelCase = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_ , run_time=2.5) , Write(lowercase_) , Write(lowercase_))
self.add(lowercase_)
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(lowercase_):
_UpperCamelCase = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
cpu_target.move_to(lowercase_)
cpu_target.generate_target()
_UpperCamelCase = 0.46 / 4
_UpperCamelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0)
cpu_targs.append(lowercase_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(lowercase_))
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase__ = {
'''camembert-base''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : Dict , lowercase_ : Dict="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Optional[int]="<mask>" , lowercase_ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Any , ) -> None:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
_UpperCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_UpperCamelCase = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
_UpperCamelCase = len(self.fairseq_tokens_to_ids)
_UpperCamelCase = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_)) + [1]
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __UpperCAmelCase ( self : Any , lowercase_ : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __getstate__( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''detr'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Any , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=None , lowercase_ : Any=3 , lowercase_ : List[Any]=100 , lowercase_ : List[Any]=6 , lowercase_ : int=2048 , lowercase_ : Tuple=8 , lowercase_ : List[Any]=6 , lowercase_ : List[Any]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Dict=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]="relu" , lowercase_ : Union[str, Any]=256 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : Any=1.0 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]="sine" , lowercase_ : Optional[int]="resnet50" , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=5 , lowercase_ : int=2 , lowercase_ : List[Any]=1 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=5 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.1 , **lowercase_ : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.get("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __UpperCAmelCase ( cls : List[Any] , lowercase_ : PretrainedConfig , **lowercase_ : int) -> List[str]:
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Dict[str, any]:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : int) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def __UpperCAmelCase ( self : List[str]) -> float:
"""simple docstring"""
return 1e-5
@property
def __UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return 12
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = get_failure_array(a__ )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(a__ ):
if pattern[j] == text[i]:
if j == (len(a__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(a__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(a__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = '''abc1abc12'''
lowerCamelCase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = '''ABABX'''
lowerCamelCase__ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = '''AAAB'''
lowerCamelCase__ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = '''abcdabcy'''
lowerCamelCase__ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase__ ( a__ = 2_000_000 ) ->int:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCamelCase = 0
# an estimate of b, using the quadratic formula
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the triangle number corresponding to b_floor
_UpperCamelCase = 42
# the triangle number corresponding to b_ceil
_UpperCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCamelCase = floor(a__ )
_UpperCamelCase = ceil(a__ )
_UpperCamelCase = triangle_numbers[b_floor]
_UpperCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_first_guess * triangle_a
_UpperCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_second_guess * triangle_a
_UpperCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = 42
__A = jnp.floataa
__A = True
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
super().setup()
_UpperCamelCase = nn.Dense(5 , dtype=self.dtype)
def __call__( self : Dict , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
_UpperCamelCase = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
def cross_entropy(a__ , a__ , a__=None ):
_UpperCamelCase = logits.shape[-1]
_UpperCamelCase = (labels[..., None] == jnp.arange(a__ )[None]).astype("f4" )
_UpperCamelCase = jax.nn.log_softmax(a__ , axis=-1 )
_UpperCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_UpperCamelCase = reduction(a__ )
return loss
_UpperCamelCase = partial(a__ , reduction=jnp.mean )
_UpperCamelCase = cross_entropy(a__ , a__ )
_UpperCamelCase = cross_entropy(a__ , a__ )
_UpperCamelCase = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = "google/bigbird-roberta-base"
__A = 3_000
__A = 10_500
__A = 128
__A = 3
__A = 1
__A = 5
# tx_args
__A = 3e-5
__A = 0.0
__A = 20_000
__A = 0.0_095
__A = "bigbird-roberta-natural-questions"
__A = "training-expt"
__A = "data/nq-training.jsonl"
__A = "data/nq-validation.jsonl"
def __UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowercase_)
_UpperCamelCase = os.path.join(self.base_dir , self.save_dir)
_UpperCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = 4_096 # no dynamic padding on TPUs
def __call__( self : List[str] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = self.collate_fn(lowercase_)
_UpperCamelCase = jax.tree_util.tree_map(lowercase_ , lowercase_)
return batch
def __UpperCAmelCase ( self : str , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.fetch_inputs(features["input_ids"])
_UpperCamelCase = {
"input_ids": jnp.array(lowercase_ , dtype=jnp.intaa),
"attention_mask": jnp.array(lowercase_ , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def __UpperCAmelCase ( self : List[Any] , lowercase_ : list) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [self._fetch_inputs(lowercase_) for ids in input_ids]
return zip(*lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : list) -> List[str]:
"""simple docstring"""
_UpperCamelCase = [1 for _ in range(len(lowercase_))]
while len(lowercase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase__ ( a__ , a__ , a__=None ) ->Optional[int]:
'''simple docstring'''
if seed is not None:
_UpperCamelCase = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
_UpperCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase__ ( a__ , a__ , **a__ ) ->Any:
'''simple docstring'''
def loss_fn(a__ ):
_UpperCamelCase = model_inputs.pop("start_labels" )
_UpperCamelCase = model_inputs.pop("end_labels" )
_UpperCamelCase = model_inputs.pop("pooled_labels" )
_UpperCamelCase = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
_UpperCamelCase , _UpperCamelCase = jax.random.split(a__ )
_UpperCamelCase = jax.value_and_grad(a__ )
_UpperCamelCase , _UpperCamelCase = grad_fn(state.params )
_UpperCamelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
_UpperCamelCase = jax.lax.pmean(a__ , "batch" )
_UpperCamelCase = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase__ ( a__ , **a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = model_inputs.pop("start_labels" )
_UpperCamelCase = model_inputs.pop("end_labels" )
_UpperCamelCase = model_inputs.pop("pooled_labels" )
_UpperCamelCase = state.apply_fn(**a__ , params=state.params , train=a__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = outputs
_UpperCamelCase = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
_UpperCamelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _UpperCAmelCase ( train_state.TrainState ):
'''simple docstring'''
__A = struct.field(pytree_node=lowerCAmelCase )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = None
def __UpperCAmelCase ( self : Tuple , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Tuple=None) -> Dict:
"""simple docstring"""
_UpperCamelCase = model.params
_UpperCamelCase = TrainState.create(
apply_fn=model.__call__ , params=lowercase_ , tx=lowercase_ , loss_fn=lowercase_ , )
if ckpt_dir is not None:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = restore_checkpoint(lowercase_ , lowercase_)
_UpperCamelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
_UpperCamelCase , _UpperCamelCase = build_tx(**lowercase_)
_UpperCamelCase = train_state.TrainState(
step=lowercase_ , apply_fn=model.__call__ , params=lowercase_ , tx=lowercase_ , opt_state=lowercase_ , )
_UpperCamelCase = args
_UpperCamelCase = data_collator
_UpperCamelCase = lr
_UpperCamelCase = params
_UpperCamelCase = jax_utils.replicate(lowercase_)
return state
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.args
_UpperCamelCase = len(lowercase_) // args.batch_size
_UpperCamelCase = jax.random.PRNGKey(0)
_UpperCamelCase = jax.random.split(lowercase_ , jax.device_count())
for epoch in range(args.max_epochs):
_UpperCamelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCamelCase = get_batched_dataset(lowercase_ , args.batch_size , seed=lowercase_)
_UpperCamelCase = 0
for batch in tqdm(lowercase_ , total=lowercase_ , desc=f'Running EPOCH-{epoch}'):
_UpperCamelCase = self.data_collator(lowercase_)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.train_step_fn(lowercase_ , lowercase_ , **lowercase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
_UpperCamelCase = jax_utils.unreplicate(state.step)
_UpperCamelCase = running_loss.item() / i
_UpperCamelCase = self.scheduler_fn(state_step - 1)
_UpperCamelCase = self.evaluate(lowercase_ , lowercase_)
_UpperCamelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(lowercase_))
self.logger.log(lowercase_ , commit=lowercase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = get_batched_dataset(lowercase_ , self.args.batch_size)
_UpperCamelCase = len(lowercase_) // self.args.batch_size
_UpperCamelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCamelCase = 0
for batch in tqdm(lowercase_ , total=lowercase_ , desc="Evaluating ... "):
_UpperCamelCase = self.data_collator(lowercase_)
_UpperCamelCase = self.val_step_fn(lowercase_ , **lowercase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = jax_utils.unreplicate(lowercase_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=" ... ")
self.model_save_fn(lowercase_ , params=state.params)
with open(os.path.join(lowercase_ , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase_ , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(lowercase_ , "data_collator.joblib"))
with open(os.path.join(lowercase_ , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , lowercase_)
print("DONE")
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=" ... " )
with open(os.path.join(a__ , "flax_model.msgpack" ) , "rb" ) as f:
_UpperCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , "opt_state.msgpack" ) , "rb" ) as f:
_UpperCamelCase = from_bytes(state.opt_state , f.read() )
_UpperCamelCase = joblib.load(os.path.join(a__ , "args.joblib" ) )
_UpperCamelCase = joblib.load(os.path.join(a__ , "data_collator.joblib" ) )
with open(os.path.join(a__ , "training_state.json" ) , "r" ) as f:
_UpperCamelCase = json.load(a__ )
_UpperCamelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = num_train_steps - warmup_steps
_UpperCamelCase = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
_UpperCamelCase = optax.linear_schedule(init_value=a__ , end_value=1e-7 , transition_steps=a__ )
_UpperCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->Any:
'''simple docstring'''
def weight_decay_mask(a__ ):
_UpperCamelCase = traverse_util.flatten_dict(a__ )
_UpperCamelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
_UpperCamelCase = scheduler_fn(a__ , a__ , a__ , a__ )
_UpperCamelCase = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
from typing import Any
def lowerCAmelCase__ ( a__ ) ->list[Any]:
'''simple docstring'''
if not input_list:
return []
_UpperCamelCase = [input_list.count(a__ ) for value in input_list]
_UpperCamelCase = max(a__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
_UpperCamelCase = DetaConfig(
backbone_config=a__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=a__ , with_box_refine=a__ , two_stage=a__ , )
# set labels
_UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
_UpperCamelCase = 366
_UpperCamelCase = "object365-id2label.json"
else:
_UpperCamelCase = 91
_UpperCamelCase = "coco-detection-id2label.json"
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(cached_download(hf_hub_url(a__ , a__ , repo_type="dataset" ) ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:dim, :]
_UpperCamelCase = in_proj_bias[: dim]
_UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase = in_proj_weight[
-dim :, :
]
_UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:hidden_size, :]
_UpperCamelCase = in_proj_bias[:hidden_size]
_UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase = in_proj_weight[-hidden_size:, :]
_UpperCamelCase = in_proj_bias[-hidden_size:]
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = get_deta_config(a__ )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f'Model name {model_name} not supported' )
_UpperCamelCase = torch.load(a__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(a__ , param.shape )
# rename keys
_UpperCamelCase = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_swin_q_k_v(a__ , config.backbone_config )
read_in_decoder_q_k_v(a__ , a__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
if "input_proj" in key:
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase = DetaForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
_UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(a__ )
# load image processor
_UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=a__ , return_tensors="pt" )
_UpperCamelCase = encoding["pixel_values"]
_UpperCamelCase = model(pixel_values.to(a__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCamelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCamelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(a__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(a__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : str=13 , lowercase_ : Optional[int]=64 , lowercase_ : Tuple=2 , lowercase_ : int=3 , lowercase_ : str=True , lowercase_ : int=True , lowercase_ : Any=32 , lowercase_ : int=5 , lowercase_ : int=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Optional[Any]=[1, 16, 4, 4] , lowercase_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = ViTHybridModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(lowercase_)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=lowercase_)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
@require_accelerate
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384")
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto")
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt")
_UpperCamelCase = model(**lowercase_)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat")
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=13 , lowercase_ : Dict=7 , lowercase_ : str=True , lowercase_ : str=True , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : Dict=99 , lowercase_ : str=[1, 1, 2] , lowercase_ : List[Any]=1 , lowercase_ : List[Any]=32 , lowercase_ : Optional[int]=4 , lowercase_ : str=8 , lowercase_ : Any=37 , lowercase_ : Union[str, Any]="gelu_new" , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=3 , lowercase_ : List[str]=4 , lowercase_ : Any=None , lowercase_ : Optional[int]=False , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = block_sizes
_UpperCamelCase = num_decoder_layers
_UpperCamelCase = d_model
_UpperCamelCase = n_head
_UpperCamelCase = d_head
_UpperCamelCase = d_inner
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = 2
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
_UpperCamelCase = n_head
# Used in the tests to check the size of the first hidden state
_UpperCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_UpperCamelCase = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_UpperCamelCase = self.num_hidden_layers + 2
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFFunnelModel(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
_UpperCamelCase = False
_UpperCamelCase = TFFunnelModel(config=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
_UpperCamelCase = False
_UpperCamelCase = TFFunnelModel(config=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __UpperCAmelCase ( self : str , lowercase_ : Any , lowercase_ : Dict , lowercase_ : int , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFFunnelBaseModel(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
_UpperCamelCase = False
_UpperCamelCase = TFFunnelBaseModel(config=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
_UpperCamelCase = False
_UpperCamelCase = TFFunnelBaseModel(config=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __UpperCAmelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , ) -> Any:
"""simple docstring"""
_UpperCamelCase = TFFunnelForPreTraining(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
_UpperCamelCase = TFFunnelForMaskedLM(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFFunnelForSequenceClassification(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFFunnelForMultipleChoice(config=lowercase_)
_UpperCamelCase = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFFunnelForTokenClassification(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = TFFunnelForQuestionAnswering(config=lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__A = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFFunnelModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_)
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = TFFunnelModelTester(self , base=lowercase_)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_)
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A red cat sitting on a park bench"
_UpperCamelCase = np.random.RandomState(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCamelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
from math import factorial, radians
def lowerCAmelCase__ ( a__ , a__ = 18 , a__ = 10 ) ->float:
'''simple docstring'''
_UpperCamelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_UpperCamelCase = radians(a__ )
_UpperCamelCase = angle_in_radians
_UpperCamelCase = 3
_UpperCamelCase = -1
for _ in range(a__ ):
result += (b * (angle_in_radians**a)) / factorial(a__ )
_UpperCamelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a__ , a__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = old_name
if "patch_embed" in old_name:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = old_name.split("." )
if layer == "0":
_UpperCamelCase = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCamelCase = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCamelCase = old_name.replace("3" , "convolution2" )
else:
_UpperCamelCase = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , a__ ):
_UpperCamelCase = r"\b\d{2}\b"
if bool(re.search(a__ , a__ ) ):
_UpperCamelCase = re.search(r"\d\.\d\d." , a__ ).group()
else:
_UpperCamelCase = re.search(r"\d\.\d." , a__ ).group()
if int(match[0] ) < 6:
_UpperCamelCase = old_name.replace(a__ , "" )
_UpperCamelCase = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCamelCase = "intermediate_stages." + trimmed_name
else:
_UpperCamelCase = old_name.replace(a__ , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCamelCase = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCamelCase = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCamelCase = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCamelCase = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCamelCase = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCamelCase = trimmed_name.replace("fc2" , "linear_out" )
_UpperCamelCase = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , a__ ):
_UpperCamelCase = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCamelCase = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCamelCase = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCamelCase = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCamelCase = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCamelCase = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCamelCase = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCamelCase = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCamelCase = new_name.replace("norm" , "layernorm" )
_UpperCamelCase = "efficientformer." + new_name
else:
_UpperCamelCase = "efficientformer.encoder." + new_name
return new_name
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
_UpperCamelCase = checkpoint.pop(a__ )
_UpperCamelCase = val
return checkpoint
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = torch.load(a__ , map_location="cpu" )["model"]
_UpperCamelCase = EfficientFormerConfig.from_json_file(a__ )
_UpperCamelCase = EfficientFormerForImageClassificationWithTeacher(a__ )
_UpperCamelCase = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCamelCase = convert_torch_checkpoint(a__ , a__ )
model.load_state_dict(a__ )
model.eval()
_UpperCamelCase = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = 256
_UpperCamelCase = 224
_UpperCamelCase = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCamelCase = processor(images=a__ , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCamelCase = Compose(
[
Resize(a__ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(a__ ),
ToTensor(),
Normalize(a__ , a__ ),
] )
_UpperCamelCase = image_transforms(a__ ).unsqueeze(0 )
assert torch.allclose(a__ , a__ )
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.logits
_UpperCamelCase = (1, 1_000)
if "l1" in model_name:
_UpperCamelCase = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , a__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCamelCase = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , a__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCamelCase = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(a__ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message="Add model" , use_temp_dir=a__ , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message="Add image processor" , use_temp_dir=a__ , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
import argparse
import os
import re
lowerCamelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCamelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ = re.compile(R'''\[([^\]]+)\]''')
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def lowerCAmelCase__ ( a__ , a__="" , a__=None , a__=None ) ->Dict:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_UpperCamelCase = ["\n".join(lines[:index] )]
else:
_UpperCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCamelCase = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(a__ ) )
if index < len(a__ ) - 1:
_UpperCamelCase = [lines[index + 1]]
index += 1
else:
_UpperCamelCase = []
else:
blocks.append("\n".join(a__ ) )
_UpperCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append("\n".join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
def _inner(a__ ):
return key(a__ ).lower().replace("_" , "" )
return _inner
def lowerCAmelCase__ ( a__ , a__=None ) ->Union[str, Any]:
'''simple docstring'''
def noop(a__ ):
return x
if key is None:
_UpperCamelCase = noop
# Constants are all uppercase, they go first.
_UpperCamelCase = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCamelCase = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCamelCase = [obj for obj in objects if not key(a__ )[0].isupper()]
_UpperCamelCase = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
def _replace(a__ ):
_UpperCamelCase = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(a__ )] ) + "]"
_UpperCamelCase = import_statement.split("\n" )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCamelCase = 2 if lines[1].strip() == "[" else 1
_UpperCamelCase = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCamelCase = sort_objects(a__ , key=lambda a__ : x[1] )
_UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
_UpperCamelCase = get_indent(lines[1] ) + ", ".join([f'"{k}"' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCamelCase = _re_bracket_content.sub(_replace , a__ )
return import_statement
def lowerCAmelCase__ ( a__ , a__=True ) ->Dict:
'''simple docstring'''
with open(a__ , "r" ) as f:
_UpperCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCamelCase = split_code_in_indented_blocks(
a__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCamelCase = main_blocks[block_idx]
_UpperCamelCase = block.split("\n" )
# Get to the start of the imports.
_UpperCamelCase = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCamelCase = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCamelCase = "\n".join(block_lines[line_idx:-1] )
_UpperCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCamelCase = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCamelCase = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCamelCase = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCamelCase = [(i, key) for i, key in enumerate(a__ ) if key is not None]
_UpperCamelCase = [x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCamelCase = 0
_UpperCamelCase = []
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCamelCase = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(a__ , "w" ) as f:
f.write("\n".join(a__ ) )
def lowerCAmelCase__ ( a__=True ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_UpperCamelCase = sort_imports(os.path.join(a__ , "__init__.py" ) , check_only=a__ )
if result:
_UpperCamelCase = [os.path.join(a__ , "__init__.py" )]
if len(a__ ) > 0:
raise ValueError(f'Would overwrite {len(a__ )} files, run `make style`.' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->list:
'''simple docstring'''
if n_term == "":
return []
_UpperCamelCase = []
for temp in range(int(a__ ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = (CMStochasticIterativeScheduler,)
__A = 10
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**lowercase_)
return config
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 10
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = self.scheduler_classes[0](**lowercase_)
scheduler.set_timesteps(lowercase_)
_UpperCamelCase = scheduler.timesteps[0]
_UpperCamelCase = scheduler.timesteps[1]
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = 1
scheduler.set_timesteps(lowercase_)
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_):
# 1. scale model input
_UpperCamelCase = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_UpperCamelCase = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowercase_))
_UpperCamelCase = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_92.76_14) < 1e-2
assert abs(result_mean.item() - 0.25_10) < 1e-3
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [106, 0]
scheduler.set_timesteps(timesteps=lowercase_)
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_UpperCamelCase = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_UpperCamelCase = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowercase_))
_UpperCamelCase = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 3_47.63_57) < 1e-2
assert abs(result_mean.item() - 0.45_27) < 1e-3
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ , msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [39, 30, 12, 1, 0]
_UpperCamelCase = len(lowercase_)
with self.assertRaises(lowercase_ , msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowercase_)
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import os
from distutils.util import strtobool
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
for e in env_keys:
_UpperCamelCase = int(os.environ.get(a__ , -1 ) )
if val >= 0:
return val
return default
def lowerCAmelCase__ ( a__ , a__=False ) ->str:
'''simple docstring'''
_UpperCamelCase = os.environ.get(a__ , str(a__ ) )
return strtobool(a__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCAmelCase__ ( a__ , a__="no" ) ->int:
'''simple docstring'''
_UpperCamelCase = os.environ.get(a__ , str(a__ ) )
return value
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = [int(a__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(a__ ) == 4 and all(0 <= int(a__ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase__ = input().strip()
lowerCamelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"{ip} is a {valid_or_invalid} IP v4 address.")
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ = 2
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , *, # begin keyword-only arguments
lowercase_ : Any="<s>" , lowercase_ : List[str]="<pad>" , lowercase_ : Dict="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[str]=None , ) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = {}
_UpperCamelCase = self.add_symbol(lowercase_)
_UpperCamelCase = self.add_symbol(lowercase_)
_UpperCamelCase = self.add_symbol(lowercase_)
_UpperCamelCase = self.add_symbol(lowercase_)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase_)
_UpperCamelCase = len(self.symbols)
def __eq__( self : Tuple , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : int , lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str]) -> Any:
"""simple docstring"""
return len(self.symbols)
def __contains__( self : Union[str, Any] , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
return sym in self.indices
@classmethod
def __UpperCAmelCase ( cls : Dict , lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = cls()
d.add_from_file(lowercase_)
return d
def __UpperCAmelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any]=1 , lowercase_ : List[Any]=False) -> List[str]:
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCamelCase = self.indices[word]
_UpperCamelCase = self.count[idx] + n
return idx
else:
_UpperCamelCase = len(self.symbols)
_UpperCamelCase = idx
self.symbols.append(lowercase_)
self.count.append(lowercase_)
return idx
def __UpperCAmelCase ( self : Any , lowercase_ : List[Any]) -> int:
"""simple docstring"""
return 0
def __UpperCAmelCase ( self : int , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(lowercase_ , lowercase_):
try:
with open(lowercase_ , "r" , encoding="utf-8") as fd:
self.add_from_file(lowercase_)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowercase_))
return
_UpperCamelCase = f.readlines()
_UpperCamelCase = self._load_meta(lowercase_)
for line in lines[indices_start_line:]:
try:
_UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
_UpperCamelCase = True
_UpperCamelCase , _UpperCamelCase = line.rsplit(" " , 1)
else:
_UpperCamelCase = False
_UpperCamelCase = int(lowercase_)
_UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowercase_))
self.add_symbol(lowercase_ , n=lowercase_ , overwrite=lowercase_)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = dict((re.sub(r"@@$" , "" , a__ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , a__ ), v) for k, v in d.items() )
_UpperCamelCase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
_UpperCamelCase = d[k] # restore
return da
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if not os.path.exists(a__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(a__ , exist_ok=a__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCamelCase = os.path.join(a__ , "checkpoint.pt" )
if not os.path.isfile(a__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
_UpperCamelCase = torch.load(a__ , map_location="cpu" )
_UpperCamelCase = chkpt["cfg"]["model"]
# dicts
_UpperCamelCase = os.path.join(a__ , "dict.txt" )
if not os.path.isfile(a__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
_UpperCamelCase = Dictionary.load(a__ )
_UpperCamelCase = rewrite_dict_keys(src_dict.indices )
_UpperCamelCase = len(a__ )
_UpperCamelCase = os.path.join(a__ , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# merges_file (bpecodes)
_UpperCamelCase = os.path.join(a__ , "bpecodes" )
if not os.path.isfile(a__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
_UpperCamelCase = os.path.join(a__ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(a__ , a__ )
# model config
_UpperCamelCase = os.path.join(a__ , "config.json" )
_UpperCamelCase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# tokenizer config
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) )
# model
_UpperCamelCase = chkpt["model"]
# remove unneeded keys
_UpperCamelCase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(a__ , a__ )
_UpperCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
_UpperCamelCase = model_state_dict.pop(a__ )
else:
_UpperCamelCase = model_state_dict.pop(a__ )
_UpperCamelCase = BioGptConfig.from_pretrained(a__ )
_UpperCamelCase = BioGptForCausalLM(a__ )
# check that it loads ok
model_new.load_state_dict(a__ )
# save
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(a__ , a__ )
print("Conversion is done!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = len(a__ )
_UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase__ = KEYMAP['''up''']
lowerCamelCase__ = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
_UpperCamelCase = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a__ ) == 0:
# Read the keystroke
_UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(a__ )
if ord(a__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCamelCase = chr(KEYMAP["esc"] )
except KeyError:
_UpperCamelCase = cha[1]
else:
_UpperCamelCase = ch.decode(a__ )
else:
_UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCamelCase = sys.stdin.fileno()
_UpperCamelCase = termios.tcgetattr(a__ )
try:
tty.setraw(a__ )
_UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(a__ , termios.TCSADRAIN , a__ )
return ch
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = get_raw_chars()
if ord(a__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a__ ) == KEYMAP["esc"]:
_UpperCamelCase = get_raw_chars()
if ord(a__ ) == KEYMAP["mod_int"]:
_UpperCamelCase = get_raw_chars()
if ord(a__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->tuple[int, int]:
'''simple docstring'''
_UpperCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCamelCase = x_den * y_den * z_den
_UpperCamelCase = gcd(a__ , a__ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase__ ( a__ = 35 ) ->int:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = Fraction(0 )
_UpperCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCamelCase = x_num * y_den + x_den * y_num
_UpperCamelCase = x_den * y_den
_UpperCamelCase = gcd(a__ , a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase = add_three(
a__ , a__ , a__ , a__ , a__ , a__ )
unique_s.add(a__ )
# n=2
_UpperCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCamelCase = x_den * x_den * y_den * y_den
if is_sq(a__ ) and is_sq(a__ ):
_UpperCamelCase = int(sqrt(a__ ) )
_UpperCamelCase = int(sqrt(a__ ) )
_UpperCamelCase = gcd(a__ , a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase = add_three(
a__ , a__ , a__ , a__ , a__ , a__ )
unique_s.add(a__ )
# n=-1
_UpperCamelCase = x_num * y_num
_UpperCamelCase = x_den * y_num + x_num * y_den
_UpperCamelCase = gcd(a__ , a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase = add_three(
a__ , a__ , a__ , a__ , a__ , a__ )
unique_s.add(a__ )
# n=2
_UpperCamelCase = x_num * x_num * y_num * y_num
_UpperCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a__ ) and is_sq(a__ ):
_UpperCamelCase = int(sqrt(a__ ) )
_UpperCamelCase = int(sqrt(a__ ) )
_UpperCamelCase = gcd(a__ , a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase = add_three(
a__ , a__ , a__ , a__ , a__ , a__ )
unique_s.add(a__ )
for num, den in unique_s:
total += Fraction(a__ , a__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : int) -> None:
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(lowercase_ , " -> " , " -> ".join([str(lowercase_) for j in self.vertex[i]]))
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : int) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_)
else:
# else make a new vertex
_UpperCamelCase = [to_vertex]
def __UpperCAmelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCamelCase = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : list) -> None:
"""simple docstring"""
_UpperCamelCase = True
print(lowercase_ , end=" ")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
if __name__ == "__main__":
lowerCamelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCamelCase__ = 6378137.0
lowerCamelCase__ = 6356752.314245
lowerCamelCase__ = 637_8137
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->float:
'''simple docstring'''
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = radians(a__ )
_UpperCamelCase = radians(a__ )
# Equation
_UpperCamelCase = sin((phi_a - phi_a) / 2 )
_UpperCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCamelCase = sqrt(sin_sq_phi + (cos(a__ ) * cos(a__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = [1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0, 0, 0
_UpperCamelCase = ugly_nums[ia] * 2
_UpperCamelCase = ugly_nums[ia] * 3
_UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , a__ ):
_UpperCamelCase = min(a__ , a__ , a__ )
ugly_nums.append(a__ )
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
import math
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
return math.pow(a__ , 2 ) - a
def lowerCAmelCase__ ( a__ ) ->float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase__ ( a__ ) ->float:
'''simple docstring'''
_UpperCamelCase = 2.0
while start <= a:
_UpperCamelCase = math.pow(a__ , 2 )
return start
def lowerCAmelCase__ ( a__ , a__ = 9_999 , a__ = 0.00000000000001 ) ->float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_UpperCamelCase = get_initial_point(a__ )
for _ in range(a__ ):
_UpperCamelCase = value
_UpperCamelCase = value - fx(a__ , a__ ) / fx_derivative(a__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
_UpperCamelCase = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(a__ )
# Let's go
_UpperCamelCase = parser.parse_args()
if not hasattr(a__ , "func" ):
parser.print_help()
exit(1 )
# Run
_UpperCamelCase = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if isinstance(a__ , a__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(a__ , a__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_UpperCamelCase = False
if num < 0:
_UpperCamelCase = True
_UpperCamelCase = -num
_UpperCamelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a__ ) for e in binary )
return "0b" + "".join(str(a__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = VideoToVideoSDPipeline
__A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
__A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
__A = PipelineTesterMixin.required_optional_params - {'''latents'''}
__A = False
# No `output_type`.
__A = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
_UpperCamelCase = CLIPTextModel(lowercase_)
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Union[str, Any]=0) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = VideoToVideoSDPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = "np"
_UpperCamelCase = sd_pipe(**lowercase_).frames
_UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ , expected_max_diff=5e-3)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa)
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase = torch.randn((1, 10, 3, 1024, 576) , generator=lowercase_)
_UpperCamelCase = video.to("cuda")
_UpperCamelCase = "Spiderman is surfing"
_UpperCamelCase = pipe(lowercase_ , video=lowercase_ , generator=lowercase_ , num_inference_steps=3 , output_type="pt").frames
_UpperCamelCase = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56])
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1e-2
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = CodeGenTokenizer
__A = CodeGenTokenizerFast
__A = True
__A = {'''add_prefix_space''': True}
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_))))
_UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase = {"unk_token": "<unk>"}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowercase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowercase_))
def __UpperCAmelCase ( self : Tuple , **lowercase_ : Any) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCAmelCase ( self : Tuple , **lowercase_ : Tuple) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = "lower newer"
_UpperCamelCase = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCamelCase = "lower newer"
_UpperCamelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowercase_)
_UpperCamelCase = "lower newer"
# Testing tokenization
_UpperCamelCase = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_)
_UpperCamelCase = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
# Testing conversion to ids without special tokens
_UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
# Testing conversion to ids with special tokens
_UpperCamelCase = self.get_rust_tokenizer(add_prefix_space=lowercase_)
_UpperCamelCase = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
# Testing the unknown token
_UpperCamelCase = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : str , *lowercase_ : int , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[int]=15) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_)
# Simple input
_UpperCamelCase = "This is a simple input"
_UpperCamelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase = ("This is a simple input", "This is a pair")
_UpperCamelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length")
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length")
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length")
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length")
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
_UpperCamelCase = "This is a simple input"
_UpperCamelCase = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase = ("This is a simple input", "This is a pair")
_UpperCamelCase = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer(lowercase_ , padding="max_length" , max_length=30 , return_tensors="np")
_UpperCamelCase = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np")
_UpperCamelCase = tokenizer(*lowercase_ , padding="max_length" , max_length=60 , return_tensors="np")
_UpperCamelCase = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = "$$$"
_UpperCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_)
_UpperCamelCase = "This is a simple input"
_UpperCamelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer(lowercase_)
_UpperCamelCase = tokenizer(lowercase_)
self.assertEqual(out_s.input_ids[0] , lowercase_)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
_UpperCamelCase = tokenizer.decode(out_s.input_ids)
_UpperCamelCase = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , lowercase_)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
_UpperCamelCase = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCamelCase = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCamelCase = tokenizer.encode(lowercase_)
_UpperCamelCase = ["^#", re.escape("<|endoftext|>"), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCamelCase = tokenizer.decode(lowercase_ , truncate_before_pattern=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
pass
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase_)
return image
@property
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowercase_)
@property
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
def extract(*lowercase_ : int , **lowercase_ : str):
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = torch.ones([0])
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(lowercase_)
return self
return Out()
return extract
def __UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet
_UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase_)
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
_UpperCamelCase = 77
_UpperCamelCase = self.dummy_image.to(lowercase_)
_UpperCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
_UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_)
_UpperCamelCase = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(0)
_UpperCamelCase = alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase_ , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(0)
_UpperCamelCase = alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowercase_ , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet
_UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase_)
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
_UpperCamelCase = 77
_UpperCamelCase = self.dummy_image.to(lowercase_)
# put models in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = vae.half()
_UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
_UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_)
_UpperCamelCase = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = alt_pipe(
[prompt] , generator=lowercase_ , num_inference_steps=2 , output_type="np" , image=lowercase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCamelCase = init_image.resize((760, 504))
_UpperCamelCase = "BAAI/AltDiffusion"
_UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCamelCase = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_UpperCamelCase = init_image.resize((768, 512))
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy")
_UpperCamelCase = "BAAI/AltDiffusion"
_UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = "A fantasy landscape, trending on artstation"
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase__ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
_UpperCamelCase = []
for num in range(len(a__ ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(a__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(a__ ) == n:
return list_nums
return []
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowerCamelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int="replace" , lowercase_ : Union[str, Any]="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : str="<mask>" , lowercase_ : Tuple=False , **lowercase_ : List[str] , ) -> str:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else bos_token
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else eos_token
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else sep_token
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else cls_token
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else unk_token
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8") as vocab_handle:
_UpperCamelCase = json.load(lowercase_)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="utf-8") as merges_handle:
_UpperCamelCase = merges_handle.read().split("\n")[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in bpe_merges]
_UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_))))
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def __UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return len(self.encoder)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = get_pairs(lowercase_)
if not pairs:
return token
while True:
_UpperCamelCase = min(lowercase_ , key=lambda lowercase_: self.bpe_ranks.get(lowercase_ , float("inf")))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowercase_):
try:
_UpperCamelCase = word.index(lowercase_ , lowercase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCamelCase = j
if word[i] == first and i < len(lowercase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = new_word
if len(lowercase_) == 1:
break
else:
_UpperCamelCase = get_pairs(lowercase_)
_UpperCamelCase = " ".join(lowercase_)
_UpperCamelCase = word
return word
def __UpperCAmelCase ( self : int , lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = []
for token in re.findall(self.pat , lowercase_):
_UpperCamelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_).split(" "))
return bpe_tokens
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
return self.decoder.get(lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "".join(lowercase_)
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowercase_ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + "\n")
_UpperCamelCase = 0
with open(lowercase_ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
_UpperCamelCase = token_index
writer.write(" ".join(lowercase_) + "\n")
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_)) + [1]
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCAmelCase ( self : Dict , lowercase_ : List[str] , lowercase_ : int=False , **lowercase_ : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase_) > 0 and not text[0].isspace()):
_UpperCamelCase = " " + text
return (text, kwargs)
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase__ = logging.getLogger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''token-classification'''
def __init__( self : str , lowercase_ : Any) -> str:
"""simple docstring"""
if type(lowercase_) == dict:
_UpperCamelCase = Namespace(**lowercase_)
_UpperCamelCase = import_module("tasks")
try:
_UpperCamelCase = getattr(lowercase_ , hparams.task_type)
_UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
_UpperCamelCase = self.token_classification_task.get_labels(hparams.labels)
_UpperCamelCase = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels) , self.mode)
def __UpperCAmelCase ( self : List[Any] , **lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
return self.model(**lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
_UpperCamelCase = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
_UpperCamelCase = self(**lowercase_)
_UpperCamelCase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.hparams
for mode in ["train", "dev", "test"]:
_UpperCamelCase = self._feature_file(lowercase_)
if os.path.exists(lowercase_) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_)
_UpperCamelCase = torch.load(lowercase_)
else:
logger.info("Creating features from dataset file at %s" , args.data_dir)
_UpperCamelCase = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_)
_UpperCamelCase = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"]) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"]) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_)
torch.save(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False) -> DataLoader:
"""simple docstring"""
_UpperCamelCase = self._feature_file(lowercase_)
logger.info("Loading features from cached file %s" , lowercase_)
_UpperCamelCase = torch.load(lowercase_)
_UpperCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
_UpperCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
_UpperCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
_UpperCamelCase = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
_UpperCamelCase = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_) , batch_size=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : str) -> Dict:
"""simple docstring"""
"""Compute validation""" ""
_UpperCamelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
_UpperCamelCase = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
_UpperCamelCase = self(**lowercase_)
_UpperCamelCase , _UpperCamelCase = outputs[:2]
_UpperCamelCase = logits.detach().cpu().numpy()
_UpperCamelCase = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.stack([x["val_loss"] for x in outputs]).mean()
_UpperCamelCase = np.concatenate([x["pred"] for x in outputs] , axis=0)
_UpperCamelCase = np.argmax(lowercase_ , axis=2)
_UpperCamelCase = np.concatenate([x["target"] for x in outputs] , axis=0)
_UpperCamelCase = dict(enumerate(self.labels))
_UpperCamelCase = [[] for _ in range(out_label_ids.shape[0])]
_UpperCamelCase = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
_UpperCamelCase = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_),
"precision": precision_score(lowercase_ , lowercase_),
"recall": recall_score(lowercase_ , lowercase_),
"f1": fa_score(lowercase_ , lowercase_),
}
_UpperCamelCase = dict(results.items())
_UpperCamelCase = results
return ret, preds_list, out_label_list
def __UpperCAmelCase ( self : int , lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._eval_end(lowercase_)
_UpperCamelCase = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._eval_end(lowercase_)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_UpperCamelCase = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCAmelCase ( lowercase_ : List[str] , lowercase_ : int) -> Any:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_)
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)")
parser.add_argument(
"--max_seq_length" , default=128 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
return parser
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = NERTransformer(args)
lowerCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
lowerCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = 42
__A = 42
def __init__( self : List[Any] , lowercase_ : UNetaDModel , lowercase_ : KarrasVeScheduler) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self : int , lowercase_ : int = 1 , lowercase_ : int = 50 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCamelCase = self.unet.config.sample_size
_UpperCamelCase = (batch_size, 3, img_size, img_size)
_UpperCamelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCamelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_UpperCamelCase = self.scheduler.schedule[t]
_UpperCamelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCamelCase , _UpperCamelCase = self.scheduler.add_noise_to_input(lowercase_ , lowercase_ , generator=lowercase_)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCamelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCamelCase = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCamelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2).sample
_UpperCamelCase = self.scheduler.step_correct(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , step_output.prev_sample , step_output["derivative"] , )
_UpperCamelCase = step_output.prev_sample
_UpperCamelCase = (sample / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
def signal_handler(a__ , a__ ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
pass
class _UpperCAmelCase ( io.StringIO ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int]) -> Any:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
raise OSError
def __UpperCAmelCase ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
return False
class _UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__A = '''stdin'''
@contextlib.contextmanager
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def lowerCAmelCase__ ( a__=None ) ->Dict:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = "1"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase__ = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion")
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger "
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_)
_UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = generator.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy").images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = "A painting of a squirrel eating a burger "
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy").images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
import sys
import turtle
def lowerCAmelCase__ ( a__ , a__ ) ->tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , ) ->None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
triangle(a__ , get_mid(a__ , a__ ) , get_mid(a__ , a__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCamelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCamelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
from __future__ import annotations
lowerCamelCase__ = '''Muhammad Umer Farooq'''
lowerCamelCase__ = '''MIT'''
lowerCamelCase__ = '''1.0.0'''
lowerCamelCase__ = '''Muhammad Umer Farooq'''
lowerCamelCase__ = '''contact@muhammadumerfarooq.me'''
lowerCamelCase__ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : str) -> None:
"""simple docstring"""
super().__init__()
_UpperCamelCase = []
_UpperCamelCase = domain
def __UpperCAmelCase ( self : int , lowercase_ : str , lowercase_ : list[tuple[str, str | None]]) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase = parse.urljoin(self.domain , lowercase_)
self.urls.append(lowercase_)
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return ".".join(get_sub_domain_name(a__ ).split("." )[-2:] )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return parse.urlparse(a__ ).netloc
def lowerCAmelCase__ ( a__ = "https://github.com" ) ->list[str]:
'''simple docstring'''
_UpperCamelCase = get_domain_name(a__ )
# Initialize the parser
_UpperCamelCase = Parser(a__ )
try:
# Open URL
_UpperCamelCase = requests.get(a__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase = requests.get(a__ )
# Get the valid email.
_UpperCamelCase = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a__ )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''xlm'''
__A = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : str , lowercase_ : Optional[Any]=30145 , lowercase_ : List[str]=2048 , lowercase_ : Dict=12 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False , lowercase_ : Any=False , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=512 , lowercase_ : List[str]=2048**-0.5 , lowercase_ : Optional[Any]=1e-1_2 , lowercase_ : List[str]=0.02 , lowercase_ : List[Any]=0 , lowercase_ : int=1 , lowercase_ : List[str]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=5 , lowercase_ : str=True , lowercase_ : Optional[Any]="first" , lowercase_ : Dict=True , lowercase_ : int=None , lowercase_ : Any=True , lowercase_ : Optional[int]=0.1 , lowercase_ : str=5 , lowercase_ : List[str]=5 , lowercase_ : Dict=0 , lowercase_ : Optional[Any]=0 , lowercase_ : str=2 , lowercase_ : int=0 , **lowercase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = emb_dim
_UpperCamelCase = n_layers
_UpperCamelCase = n_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = use_lang_emb
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = bos_index
_UpperCamelCase = eos_index
_UpperCamelCase = pad_index
_UpperCamelCase = unk_index
_UpperCamelCase = mask_index
_UpperCamelCase = is_encoder
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = embed_init_std
_UpperCamelCase = init_std
_UpperCamelCase = summary_type
_UpperCamelCase = summary_use_proj
_UpperCamelCase = summary_activation
_UpperCamelCase = summary_proj_to_labels
_UpperCamelCase = summary_first_dropout
_UpperCamelCase = start_n_top
_UpperCamelCase = end_n_top
_UpperCamelCase = mask_token_id
_UpperCamelCase = lang_id
if "n_words" in kwargs:
_UpperCamelCase = kwargs["n_words"]
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , **lowercase_)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''decision_transformer'''
__A = ['''past_key_values''']
__A = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , lowercase_ : Union[str, Any]=17 , lowercase_ : int=4 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=4096 , lowercase_ : Dict=True , lowercase_ : Any=1 , lowercase_ : Optional[int]=1024 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=None , lowercase_ : Any="relu" , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=True , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=50256 , lowercase_ : List[Any]=50256 , lowercase_ : Tuple=False , lowercase_ : Any=False , **lowercase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
_UpperCamelCase = state_dim
_UpperCamelCase = act_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_ep_len
_UpperCamelCase = action_tanh
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = parent
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return {}
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCamelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = MarkupLMFeatureExtractor if is_bsa_available() else None
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MarkupLMFeatureExtractionTester(self)
@property
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class()
# Test not batched input
_UpperCamelCase = get_html_strings()[0]
_UpperCamelCase = feature_extractor(lowercase_)
# fmt: off
_UpperCamelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCamelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowercase_)
self.assertEqual(encoding.xpaths , lowercase_)
# Test batched
_UpperCamelCase = get_html_strings()
_UpperCamelCase = feature_extractor(lowercase_)
# fmt: off
_UpperCamelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCamelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , lowercase_)
self.assertEqual(encoding.xpaths , lowercase_)
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
from collections.abc import Sequence
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
_UpperCamelCase = 0.0
for coeff in reversed(a__ ):
_UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''longformer'''
def __init__( self : Dict , lowercase_ : Union[List[int], int] = 512 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 30522 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 3072 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 512 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1e-1_2 , lowercase_ : bool = False , **lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = attention_window
_UpperCamelCase = sep_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = onnx_export
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None) -> Any:
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ , lowercase_)
_UpperCamelCase = True
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCamelCase = super().outputs
if self.task == "default":
_UpperCamelCase = {0: "batch"}
return outputs
@property
def __UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 1e-4
@property
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14)
def __UpperCAmelCase ( self : Dict , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_UpperCamelCase = torch.zeros_like(inputs["input_ids"])
# make every second token global
_UpperCamelCase = 1
return inputs
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[int]=13 , lowercase_ : Tuple=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Optional[int]=True , lowercase_ : Dict=True , lowercase_ : Tuple=99 , lowercase_ : str=64 , lowercase_ : str=32 , lowercase_ : Optional[Any]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Any=3 , lowercase_ : Tuple=4 , lowercase_ : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = embedding_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = MobileBertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_ , token_type_ids=lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __UpperCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = MobileBertForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = MobileBertForNextSentencePrediction(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __UpperCAmelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = MobileBertForPreTraining(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MobileBertForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileBertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileBertForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = MobileBertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__A = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str=False) -> int:
"""simple docstring"""
_UpperCamelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class in get_values(lowercase_):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = MobileBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_)
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_)
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return torch.tensor(
a__ , dtype=torch.long , device=a__ , )
lowerCamelCase__ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(lowercase_)
_UpperCamelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = torch.Size((1, 9, 512))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
_UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''open-llama'''
def __init__( self : str , lowercase_ : Optional[int]=100000 , lowercase_ : List[str]=4096 , lowercase_ : Tuple=11008 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=32 , lowercase_ : str="silu" , lowercase_ : int=2048 , lowercase_ : str=0.02 , lowercase_ : Dict=1e-6 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=0 , lowercase_ : int=1 , lowercase_ : int=2 , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Dict=None , **lowercase_ : Optional[int] , ) -> str:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = rms_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = kwargs.pop(
"use_memorry_efficient_attention" , lowercase_)
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_dropout_prob
_UpperCamelCase = use_stable_embedding
_UpperCamelCase = shared_input_output_embedding
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}')
_UpperCamelCase = self.rope_scaling.get("type" , lowercase_)
_UpperCamelCase = self.rope_scaling.get("factor" , lowercase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
from __future__ import annotations
from collections import deque
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : list[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(lowercase_)
self.set_fail_transitions()
def __UpperCAmelCase ( self : Tuple , lowercase_ : int , lowercase_ : str) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> None:
"""simple docstring"""
_UpperCamelCase = 0
for character in keyword:
_UpperCamelCase = self.find_next_state(lowercase_ , lowercase_)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
_UpperCamelCase = len(self.adlist) - 1
else:
_UpperCamelCase = next_state
self.adlist[current_state]["output"].append(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> None:
"""simple docstring"""
_UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowercase_)
_UpperCamelCase = 0
while q:
_UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowercase_)
_UpperCamelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(lowercase_ , self.adlist[child]["value"]) is None
and state != 0
):
_UpperCamelCase = self.adlist[state]["fail_state"]
_UpperCamelCase = self.find_next_state(
lowercase_ , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
_UpperCamelCase = 0
_UpperCamelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __UpperCAmelCase ( self : int , lowercase_ : str) -> dict[str, list[int]]:
"""simple docstring"""
_UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCamelCase = 0
for i in range(len(lowercase_)):
while (
self.find_next_state(lowercase_ , string[i]) is None
and current_state != 0
):
_UpperCamelCase = self.adlist[current_state]["fail_state"]
_UpperCamelCase = self.find_next_state(lowercase_ , string[i])
if next_state is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCamelCase = []
result[key].append(i - len(lowercase_) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = ""
for word_or_phrase in separated:
if not isinstance(a__ , a__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCAmelCase ( unittest.TestCase, lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_tool("text-classification")
self.tool.setup()
_UpperCamelCase = load_tool("text-classification" , remote=lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(lowercase_ , "positive")
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.remote_tool("That's quite cool" , ["positive", "negative"])
self.assertEqual(lowercase_ , "positive")
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(lowercase_ , "positive")
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"])
self.assertEqual(lowercase_ , "positive")
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=3 , lowercase_ : Any=4 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=7 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=36 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : Any="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Dict=512 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=6 , lowercase_ : Dict=6 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=4 , lowercase_ : Dict=None , lowercase_ : str=1000 , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = coordinate_size
_UpperCamelCase = shape_size
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCamelCase = text_seq_length
_UpperCamelCase = (image_size // patch_size) ** 2 + 1
_UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
_UpperCamelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = tmp_coordinate
_UpperCamelCase = tf.constant(lowercase_)
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
_UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFLayoutLMvaModel(config=lowercase_)
# text + image
_UpperCamelCase = model(lowercase_ , pixel_values=lowercase_ , training=lowercase_)
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , training=lowercase_ , )
_UpperCamelCase = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
_UpperCamelCase = model(lowercase_ , training=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_UpperCamelCase = model({"pixel_values": pixel_values} , training=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMvaForSequenceClassification(config=lowercase_)
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMvaForTokenClassification(config=lowercase_)
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = 2
_UpperCamelCase = TFLayoutLMvaForQuestionAnswering(config=lowercase_)
_UpperCamelCase = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__A = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int=False) -> dict:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
_UpperCamelCase = {
k: tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(lowercase_ , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
_UpperCamelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowercase_):
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowercase_):
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowercase_):
_UpperCamelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFLayoutLMvaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
if getattr(lowercase_ , "hf_compute_loss" , lowercase_):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase_)[0]
]
_UpperCamelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = prepared_for_class.pop("input_ids")
_UpperCamelCase = model(lowercase_ , **lowercase_)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_UpperCamelCase = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_UpperCamelCase = -100
_UpperCamelCase = tf.convert_to_tensor(lowercase_)
_UpperCamelCase = model(lowercase_ , **lowercase_)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_)
_UpperCamelCase = model(lowercase_)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_)
# Get keys that were added with the _prepare_for_class function
_UpperCamelCase = prepared_for_class.keys() - inputs_dict.keys()
_UpperCamelCase = inspect.signature(model.call).parameters
_UpperCamelCase = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_UpperCamelCase = {0: "input_ids"}
for label_key in label_keys:
_UpperCamelCase = signature_names.index(lowercase_)
_UpperCamelCase = label_key
_UpperCamelCase = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCamelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_UpperCamelCase = prepared_for_class[value]
_UpperCamelCase = tuple(lowercase_)
# Send to model
_UpperCamelCase = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFLayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="tf").pixel_values
_UpperCamelCase = tf.constant([[1, 2]])
_UpperCamelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
_UpperCamelCase = model(input_ids=lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , training=lowercase_)
# verify the logits
_UpperCamelCase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
_UpperCamelCase = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''masked_bert'''
def __init__( self : List[str] , lowercase_ : Union[str, Any]=30522 , lowercase_ : Union[str, Any]=768 , lowercase_ : str=12 , lowercase_ : List[str]=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Any=0.02 , lowercase_ : Union[str, Any]=1e-1_2 , lowercase_ : Optional[Any]=0 , lowercase_ : Dict="topK" , lowercase_ : Dict="constant" , lowercase_ : List[Any]=0.0 , **lowercase_ : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = pruning_method
_UpperCamelCase = mask_init
_UpperCamelCase = mask_scale
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dandelin/vilt-b32-finetuned-vqa'''
__A = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__A = '''image_qa'''
__A = AutoProcessor
__A = AutoModelForVisualQuestionAnswering
__A = ['''image''', '''text''']
__A = ['''text''']
def __init__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["vision"])
super().__init__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : "Image" , lowercase_ : str) -> Tuple:
"""simple docstring"""
return self.pre_processor(lowercase_ , lowercase_ , return_tensors="pt")
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
with torch.no_grad():
return self.model(**lowercase_).logits
def __UpperCAmelCase ( self : Any , lowercase_ : Any) -> int:
"""simple docstring"""
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
import os
import sys
import unittest
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase__ = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase__ = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = get_test_to_tester_mapping(lowercase_)
_UpperCamelCase = get_test_to_tester_mapping(lowercase_)
_UpperCamelCase = {"BertModelTest": "BertModelTester"}
_UpperCamelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = get_model_to_test_mapping(lowercase_)
_UpperCamelCase = get_model_to_test_mapping(lowercase_)
_UpperCamelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_UpperCamelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = get_model_to_tester_mapping(lowercase_)
_UpperCamelCase = get_model_to_tester_mapping(lowercase_)
_UpperCamelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_UpperCamelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
self.assertEqual(get_test_info.to_json(lowercase_) , lowercase_)
| 82 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(a__ )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''pixel_values''']
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = size if size is not None else {"height": 384, "width": 384}
_UpperCamelCase = get_size_dict(lowercase_)
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
_UpperCamelCase = get_resize_output_image_size(
lowercase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowercase_)
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCAmelCase ( self : str , lowercase_ : int , lowercase_ : List[Tuple] = None) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(lowercase_):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowercase_)):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_)
_UpperCamelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
_UpperCamelCase = logits.argmax(dim=1)
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 82 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ ) ->float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(a__ ) / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''xlm-prophetnet'''
__A = ['''past_key_values''']
__A = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[Union[str, Callable]] = "gelu" , lowercase_ : Optional[int] = 30522 , lowercase_ : Optional[int] = 1024 , lowercase_ : Optional[int] = 4096 , lowercase_ : Optional[int] = 12 , lowercase_ : Optional[int] = 16 , lowercase_ : Optional[int] = 4096 , lowercase_ : Optional[int] = 12 , lowercase_ : Optional[int] = 16 , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[float] = 0.1 , lowercase_ : Optional[int] = 512 , lowercase_ : Optional[float] = 0.02 , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : Optional[int] = 0 , lowercase_ : Optional[int] = 2 , lowercase_ : Optional[int] = 32 , lowercase_ : Optional[int] = 128 , lowercase_ : Optional[bool] = False , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[bool] = True , lowercase_ : Optional[int] = 0 , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 2 , **lowercase_ : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = num_encoder_layers
_UpperCamelCase = num_encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = num_decoder_layers
_UpperCamelCase = num_decoder_attention_heads
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = init_std # Normal(0, this parameter)
_UpperCamelCase = activation_function
# parameters for xlmprophetnet
_UpperCamelCase = ngram
_UpperCamelCase = num_buckets
_UpperCamelCase = relative_max_distance
_UpperCamelCase = disable_ngram_loss
_UpperCamelCase = eps
# 3 Types of Dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = dropout
_UpperCamelCase = use_cache
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
@property
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 82 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
lowerCamelCase__ = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, '''rb''') as fp:
lowerCamelCase__ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCamelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase__ = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase__ = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 82 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 1 |
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase__ ( a__ , a__ , a__=None , **a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [x.strip() for x in open(a__ ).readlines()]
_UpperCamelCase = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
_UpperCamelCase = calculate_rouge(a__ , a__ , **a__ )
if save_path is not None:
save_json(a__ , a__ , indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 82 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 1 |
import re
import string
import numpy as np
import datasets
lowerCamelCase__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCamelCase__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCamelCase__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , reference_urls=[] , )
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict=None , lowercase_ : Optional[Any]=False , lowercase_ : Any=False , lowercase_ : List[str]=False , ) -> int:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_UpperCamelCase = np.array([re.sub(lowercase_ , "" , lowercase_) for x in predictions])
_UpperCamelCase = np.array([re.sub(lowercase_ , "" , lowercase_) for x in references])
else:
_UpperCamelCase = np.asarray(lowercase_)
_UpperCamelCase = np.asarray(lowercase_)
if ignore_case:
_UpperCamelCase = np.char.lower(lowercase_)
_UpperCamelCase = np.char.lower(lowercase_)
if ignore_punctuation:
_UpperCamelCase = string.punctuation.maketrans("" , "" , string.punctuation)
_UpperCamelCase = np.char.translate(lowercase_ , table=lowercase_)
_UpperCamelCase = np.char.translate(lowercase_ , table=lowercase_)
if ignore_numbers:
_UpperCamelCase = string.digits.maketrans("" , "" , string.digits)
_UpperCamelCase = np.char.translate(lowercase_ , table=lowercase_)
_UpperCamelCase = np.char.translate(lowercase_ , table=lowercase_)
_UpperCamelCase = predictions == references
return {"exact_match": np.mean(lowercase_) * 100}
| 82 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 1 |
import datasets
from .evaluate import evaluate
lowerCamelCase__ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
lowerCamelCase__ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
lowerCamelCase__ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
},
}) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCamelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=lowercase_ , predictions=lowercase_)
return score
| 82 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''pixel_values''']
def __init__( self : List[Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size")
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase = do_convert_rgb
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
_UpperCamelCase = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_)
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase = [convert_to_rgb(lowercase_) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 82 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''xlm-roberta-xl'''
def __init__( self : str , lowercase_ : Any=250880 , lowercase_ : Optional[int]=2560 , lowercase_ : List[str]=36 , lowercase_ : Dict=32 , lowercase_ : Any=10240 , lowercase_ : Optional[int]="gelu" , lowercase_ : str=0.1 , lowercase_ : str=0.1 , lowercase_ : int=514 , lowercase_ : Tuple=1 , lowercase_ : Any=0.02 , lowercase_ : Optional[int]=1e-0_5 , lowercase_ : Optional[int]=1 , lowercase_ : str=0 , lowercase_ : Any=2 , lowercase_ : List[Any]="absolute" , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=None , **lowercase_ : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : str) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 82 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
return None
class _UpperCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple) -> Any:
"""simple docstring"""
return None
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , "tf" , 12 , **lowercase_)
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , "pt" , 12 , **lowercase_)
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
from transformers import BertModel
_UpperCamelCase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t") as vocab_file:
vocab_file.write("\n".join(lowercase_))
vocab_file.flush()
_UpperCamelCase = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
_UpperCamelCase = BertModel(BertConfig(vocab_size=len(lowercase_)))
model.save_pretrained(lowercase_)
self._test_export(lowercase_ , "pt" , 12 , lowercase_)
@require_tf
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCamelCase = self._test_export(lowercase_ , "tf" , 12 , **lowercase_)
_UpperCamelCase = quantize(Path(lowercase_))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
@require_torch
@slow
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCamelCase = self._test_export(lowercase_ , "pt" , 12 , **lowercase_)
_UpperCamelCase = quantize(lowercase_)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str=None , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_UpperCamelCase = Path(lowercase_).joinpath("model.onnx")
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_)
return path
except Exception as e:
self.fail(lowercase_)
@require_torch
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
from transformers import BertModel
_UpperCamelCase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_UpperCamelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , "pt")
@require_tf
@require_tokenizers
@slow
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
from transformers import TFBertModel
_UpperCamelCase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_UpperCamelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , "tf")
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : str , lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = FeatureExtractionPipeline(lowercase_ , lowercase_)
_UpperCamelCase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = infer_shapes(lowercase_ , lowercase_)
# Assert all variables are present
self.assertEqual(len(lowercase_) , len(lowercase_))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , lowercase_)
self.assertSequenceEqual(variable_names[3:] , lowercase_)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"})
self.assertDictEqual(shapes["output_1"] , {0: "batch"})
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = ["input_ids", "attention_mask", "token_type_ids"]
_UpperCamelCase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_UpperCamelCase , _UpperCamelCase = ensure_valid_input(FuncContiguousArgs() , lowercase_ , lowercase_)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_) , 3)
# Should have exactly the same input names
self.assertEqual(set(lowercase_) , set(lowercase_))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_UpperCamelCase , _UpperCamelCase = ensure_valid_input(FuncNonContiguousArgs() , lowercase_ , lowercase_)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_) , 1)
self.assertEqual(len(lowercase_) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"])
self.assertEqual(ordered_input_names[0] , "input_ids")
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = generate_identified_filename(Path("/home/something/my_fake_model.onnx") , "-test")
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix())
| 82 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 1 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCamelCase = grid[0]
for row_n in range(1 , len(a__ ) ):
_UpperCamelCase = grid[row_n]
_UpperCamelCase = fill_row(a__ , a__ )
_UpperCamelCase = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__ ( a__ , a__ ) ->list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(a__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 82 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 1 |
from __future__ import annotations
lowerCamelCase__ = [True] * 100_0001
lowerCamelCase__ = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
lowerCamelCase__ = False
i += 1
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return seive[n]
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return any(digit in "02468" for digit in str(a__ ) )
def lowerCAmelCase__ ( a__ = 1_000_000 ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(a__ ) and not contains_an_even_digit(a__ ):
_UpperCamelCase = str(a__ )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(a__ ) )]
if all(is_prime(a__ ) for i in list_nums ):
result.append(a__ )
return result
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 82 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCAmelCase__ ( a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ) ->Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : int , lowercase_ : List[str]=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[str]=True , lowercase_ : int=False , lowercase_ : List[str]=99 , lowercase_ : Any=16 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=4 , lowercase_ : Any="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=32 , lowercase_ : Optional[int]=2 , lowercase_ : List[str]=1 , lowercase_ : str=0 , lowercase_ : Optional[int]=0.02 , ) -> int:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = initializer_range
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
_UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
_UpperCamelCase = shift_tokens_right(lowercase_ , 1 , 2)
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowercase_)
_UpperCamelCase = model.encode(inputs_dict["input_ids"])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
_UpperCamelCase = model.decode(lowercase_ , lowercase_)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}')
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowercase_)
_UpperCamelCase = model.encode(inputs_dict["input_ids"])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
_UpperCamelCase = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}')
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = 99
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_config_and_data()
_UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase_)
_UpperCamelCase = lm_model(input_ids=lowercase_)
_UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_)
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase_)
_UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
_UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
_UpperCamelCase = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_)
_UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_)
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
_UpperCamelCase = shift_tokens_right(lowercase_ , 1 , 2)
_UpperCamelCase = np.equal(lowercase_ , 1).astype(np.floataa).sum()
_UpperCamelCase = np.equal(lowercase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(lowercase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase, lowerCAmelCase ):
'''simple docstring'''
__A = True
__A = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__A = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = FlaxBlenderbotSmallModelTester(self)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_)
_UpperCamelCase = model_class(lowercase_)
@jax.jit
def encode_jitted(lowercase_ : Union[str, Any] , lowercase_ : Tuple=None , **lowercase_ : Any):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_)
with self.subTest("JIT Enabled"):
_UpperCamelCase = encode_jitted(**lowercase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
_UpperCamelCase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ : Any , lowercase_ : int , lowercase_ : List[str]):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled"):
_UpperCamelCase = decode_jitted(**lowercase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("facebook/blenderbot_small-90M")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase = np.ones((1, 1)) * model.config.eos_token_id
_UpperCamelCase = model(lowercase_)
self.assertIsNotNone(lowercase_)
| 82 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = 9, 14 # noqa: F841
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = defaultdict(a__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCamelCase = mst(a__ )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCamelCase = tuple(answer[:2] )
_UpperCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 82 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '''▁'''
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowerCamelCase__ = {
'''google/pegasus-xsum''': 512,
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PegasusTokenizer
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : Optional[int]=None , lowercase_ : Tuple=103 , **lowercase_ : Any , ) -> Any:
"""simple docstring"""
_UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase_)}, but is'
f' {type(lowercase_)}')
_UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase_) , self.offset - 1)
]
if len(set(lowercase_)) != len(lowercase_):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
_UpperCamelCase = additional_special_tokens_extended
else:
_UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset)]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowercase_)
elif token_ids_a is None:
return self._special_token_mask(lowercase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Tuple=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
return (out_vocab_file,)
| 82 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ , a__=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__=False ) ->List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ""
else:
_UpperCamelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__=True ) ->Dict:
'''simple docstring'''
_UpperCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCamelCase = 8
# set labels if required
if not base_model:
_UpperCamelCase = 1_000
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "imagenet-1k-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCamelCase = 384
_UpperCamelCase = 1_536
_UpperCamelCase = 12
_UpperCamelCase = 6
# load original model from torch hub
_UpperCamelCase = torch.hub.load("facebookresearch/dino:main" , a__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(a__ )
_UpperCamelCase = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , a__ )
# load HuggingFace model
if base_model:
_UpperCamelCase = ViTModel(a__ , add_pooling_layer=a__ ).eval()
else:
_UpperCamelCase = ViTForImageClassification(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCamelCase = ViTImageProcessor()
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCamelCase = encoding["pixel_values"]
_UpperCamelCase = model(a__ )
if base_model:
_UpperCamelCase = original_model(a__ )
assert torch.allclose(a__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_UpperCamelCase = original_model(a__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1e-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 82 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''mvp'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , lowercase_ : List[str]=50267 , lowercase_ : Any=1024 , lowercase_ : Dict=12 , lowercase_ : Any=4096 , lowercase_ : int=16 , lowercase_ : str=12 , lowercase_ : List[Any]=4096 , lowercase_ : List[str]=16 , lowercase_ : Dict=0.0 , lowercase_ : Any=0.0 , lowercase_ : Any="gelu" , lowercase_ : Optional[int]=1024 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Any=0.02 , lowercase_ : int=0.0 , lowercase_ : Optional[int]=False , lowercase_ : Any=True , lowercase_ : Optional[int]=1 , lowercase_ : Optional[int]=0 , lowercase_ : Any=2 , lowercase_ : str=True , lowercase_ : Dict=2 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=100 , lowercase_ : List[Any]=800 , **lowercase_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = classifier_dropout
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase = use_prompt
_UpperCamelCase = prompt_length
_UpperCamelCase = prompt_mid_dim
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowercase_):
_UpperCamelCase = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed.")
| 82 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=18 , lowercase_ : Tuple=30 , lowercase_ : List[str]=400 , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = DPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = DPTImageProcessingTester(self)
@property
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , "image_mean"))
self.assertTrue(hasattr(lowercase_ , "image_std"))
self.assertTrue(hasattr(lowercase_ , "do_normalize"))
self.assertTrue(hasattr(lowercase_ , "do_resize"))
self.assertTrue(hasattr(lowercase_ , "size"))
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(lowercase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(lowercase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(lowercase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 82 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 1 |
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
__A = '''bert-generation'''
def __init__( self : int , lowercase_ : int=50358 , lowercase_ : int=1024 , lowercase_ : List[str]=24 , lowercase_ : int=16 , lowercase_ : int=4096 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=1e-1_2 , lowercase_ : str=0 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[str]="absolute" , lowercase_ : List[str]=True , **lowercase_ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
| 700 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.