code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( _lowercase ):
a = ["""image_processor""", """tokenizer"""]
a = """Pix2StructImageProcessor"""
a = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__: bool = True , UpperCamelCase__: Union[bool, str, PaddingStrategy] = False , UpperCamelCase__: Union[bool, str, TruncationStrategy] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[int] = 2_048 , UpperCamelCase__: int = 0 , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[Union[str, TensorType]] = None , **UpperCamelCase__: Optional[Any] , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCamelCase__ : Dict = self.tokenizer
lowerCamelCase__ : Any = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCamelCase__ : List[Any] = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , max_patches=UpperCamelCase__ , **UpperCamelCase__ )
else:
# add pixel_values and bbox
lowerCamelCase__ : List[str] = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , max_patches=UpperCamelCase__ , header_text=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and not self.image_processor.is_vqa:
lowerCamelCase__ : Optional[Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if "attention_mask" in text_encoding:
lowerCamelCase__ : Dict = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
lowerCamelCase__ : int = text_encoding.pop("""input_ids""" )
else:
lowerCamelCase__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def lowerCamelCase_ ( self: str , *UpperCamelCase__: Optional[Any] , **UpperCamelCase__: List[str] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[int] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase : Dict = logging.get_logger(__name__)
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = UNetaDModel
__lowercase = """sample"""
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 4
_snake_case = 3
_snake_case = (32, 32)
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
_snake_case = torch.tensor([10] ).to(lowerCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = UNetaDModel
__lowercase = """sample"""
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 4
_snake_case = 4
_snake_case = (32, 32)
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
_snake_case = torch.tensor([10] ).to(lowerCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase_ )
_snake_case = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
_snake_case = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=lowerCAmelCase_ )
model_accelerate.to(lowerCAmelCase_ )
model_accelerate.eval()
_snake_case = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_snake_case = noise.to(lowerCAmelCase_ )
_snake_case = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase_ )
_snake_case = model_accelerate(lowerCAmelCase_ , lowerCAmelCase_ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_snake_case , _snake_case = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=lowerCAmelCase_ , low_cpu_mem_usage=lowerCAmelCase_ )
model_normal_load.to(lowerCAmelCase_ )
model_normal_load.eval()
_snake_case = model_normal_load(lowerCAmelCase_ , lowerCAmelCase_ )['sample']
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(lowerCAmelCase_ )
_snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_snake_case = noise.to(lowerCAmelCase_ )
_snake_case = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ).sample
_snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-3 ) )
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = UNetaDModel
__lowercase = """sample"""
@property
def lowerCamelCase ( self , lowerCAmelCase_=(32, 32) ):
"""simple docstring"""
_snake_case = 4
_snake_case = 3
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
_snake_case = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase_ )
_snake_case = self.dummy_input
_snake_case = floats_tensor((4, 3) + (2_56, 2_56) ).to(lowerCAmelCase_ )
_snake_case = noise
_snake_case = model(**lowerCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(lowerCAmelCase_ )
_snake_case = 4
_snake_case = 3
_snake_case = (2_56, 2_56)
_snake_case = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
_snake_case = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ).sample
_snake_case = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_snake_case = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(lowerCAmelCase_ )
_snake_case = 4
_snake_case = 3
_snake_case = (32, 32)
_snake_case = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
_snake_case = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ).sample
_snake_case = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_snake_case = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 42
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = """imagegpt"""
a__ : int = ["""past_key_values"""]
a__ : List[str] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=512 + 1 , __lowercase=32 * 32 , __lowercase=512 , __lowercase=24 , __lowercase=8 , __lowercase=None , __lowercase="quick_gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , **__lowercase , ) -> Optional[Any]:
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Any = n_positions
__UpperCamelCase :int = n_embd
__UpperCamelCase :Optional[int] = n_layer
__UpperCamelCase :Optional[int] = n_head
__UpperCamelCase :Optional[Any] = n_inner
__UpperCamelCase :str = activation_function
__UpperCamelCase :Tuple = resid_pdrop
__UpperCamelCase :Dict = embd_pdrop
__UpperCamelCase :Optional[Any] = attn_pdrop
__UpperCamelCase :Optional[Any] = layer_norm_epsilon
__UpperCamelCase :Tuple = initializer_range
__UpperCamelCase :Union[str, Any] = scale_attn_weights
__UpperCamelCase :Tuple = use_cache
__UpperCamelCase :Optional[int] = scale_attn_by_inverse_layer_idx
__UpperCamelCase :Tuple = reorder_and_upcast_attn
__UpperCamelCase :int = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
])
def UpperCamelCase__ ( self , __lowercase , __lowercase = 1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 32 , __lowercase = 32 , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :List[str] = dict(preprocessor(images=__lowercase , return_tensors=__lowercase))
return inputs
| 43
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_a : Any = TypeVar('T')
class __A ( Generic[T] ):
_UpperCamelCase : deque[T] # Cache store of keys
_UpperCamelCase : set[T] # References of the keys in cache
_UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self , a__ ):
_lowerCAmelCase : Any = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : List[str] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_lowerCAmelCase : List[Any] = n
def __A ( self , a__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Dict = self.dq_store.pop()
self.key_reference.remove(a__ )
else:
self.dq_store.remove(a__ )
self.dq_store.appendleft(a__ )
self.key_reference.add(a__ )
def __A ( self ):
for k in self.dq_store:
print(a__ )
def __repr__( self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase = importlib.import_module(F'.{module_name}' , """transformers.models""" )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , """__name__""" , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
lowerCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class lowercase :
def __init__( self ) -> str:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowercase )
def _snake_case ( cls , lowercase , **lowercase ) -> Union[str, Any]:
lowerCAmelCase = kwargs.pop("""config""" , lowercase )
lowerCAmelCase = kwargs.pop("""trust_remote_code""" , lowercase )
lowerCAmelCase = True
lowerCAmelCase , lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowercase , **lowercase )
lowerCAmelCase = config_dict.get("""image_processor_type""" , lowercase )
lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
lowerCAmelCase = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase = config_dict.pop("""feature_extractor_type""" , lowercase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
lowerCAmelCase = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowerCAmelCase = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase , lowercase ):
lowerCAmelCase = AutoConfig.from_pretrained(lowercase , **lowercase )
# It could be in `config.image_processor_type``
lowerCAmelCase = getattr(lowercase , """image_processor_type""" , lowercase )
if hasattr(lowercase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowerCAmelCase = image_processor_class_from_name(lowercase )
lowerCAmelCase = image_processor_auto_map is not None
lowerCAmelCase = image_processor_class is not None or type(lowercase ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase = resolve_trust_remote_code(
lowercase , lowercase , lowercase , lowercase )
if has_remote_code and trust_remote_code:
lowerCAmelCase = get_class_from_dynamic_module(
lowercase , lowercase , **lowercase )
lowerCAmelCase = kwargs.pop("""code_revision""" , lowercase )
if os.path.isdir(lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase , **lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase , **lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowercase )]
return image_processor_class.from_dict(lowercase , **lowercase )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(lowercase , lowercase )
| 46
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_SCREAMING_SNAKE_CASE =True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_SCREAMING_SNAKE_CASE =True
if a[i].islower():
_SCREAMING_SNAKE_CASE =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : int = parent
lowerCamelCase : int = 13
lowerCamelCase : str = 7
lowerCamelCase : Any = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Dict = True
lowerCamelCase : List[Any] = 99
lowerCamelCase : List[Any] = 32
lowerCamelCase : str = 2
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : str = 37
lowerCamelCase : Any = "gelu"
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : Dict = 0.1
lowerCamelCase : Optional[Any] = 512
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : List[Any] = 2
lowerCamelCase : int = 0.02
lowerCamelCase : Tuple = 3
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Any = None
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Tuple = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Dict:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Dict = self.prepare_config_and_inputs()
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : List[Any] = TFEsmModel(config=UpperCamelCase__ )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
lowerCamelCase : int = [input_ids, input_mask]
lowerCamelCase : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ )
lowerCamelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase : List[str] = model(UpperCamelCase__ )
lowerCamelCase : Tuple = [input_ids, input_mask]
lowerCamelCase : Dict = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ )
# Also check the case where encoder outputs are not passed
lowerCamelCase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : List[str] = TFEsmForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase : Any = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Dict = TFEsmForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Optional[Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : Any = False
lowerCamelCase_ : Dict = False
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = TFEsmModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFEsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self ) -> List[str]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self ) -> Optional[Any]:
pass
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase : Any = model.get_bias()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for k, v in name.items():
assert isinstance(UpperCamelCase__ , tf.Variable )
else:
lowerCamelCase : str = model.get_output_embeddings()
assert x is None
lowerCamelCase : Optional[Any] = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Dict:
lowerCamelCase : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
lowerCamelCase : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase : List[str] = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : Dict = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Dict = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase : int = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 48
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = DistilBertModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = DistilBertForMaskedLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = DistilBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.num_choices
__a = DistilBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[str] = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : str = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Union[str, Any] = True
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = DistilBertModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DistilBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@slow
@require_torch_gpu
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__a = True
__a = model_class(config=__SCREAMING_SNAKE_CASE)
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt'''))
__a = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''') , map_location=__SCREAMING_SNAKE_CASE)
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE))
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
__a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : int = logging.getLogger()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase__ : Tuple = parser.parse_args()
return args.f
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] ) -> None:
lowerCamelCase__ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> List[str]:
lowerCamelCase__ : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(UpperCAmelCase , 'argv' , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCAmelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def A_ ( self : str ) -> Optional[int]:
lowerCamelCase__ : Any = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(UpperCAmelCase )
lowerCamelCase__ : List[Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(UpperCAmelCase )
| 50
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = VQModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]=(32, 32)):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes).to(_snake_case)
return {"sample": image}
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case)
self.assertIsNotNone(_snake_case)
self.assertEqual(len(loading_info['''missing_keys''']) , 0)
model.to(_snake_case)
UpperCAmelCase_ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''')
model.to(_snake_case).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
UpperCAmelCase_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
UpperCAmelCase_ = image.to(_snake_case)
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
| 51
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase : Optional[int] = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase : Any = CLIPTextModel(A_ )
UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : int = torch.manual_seed(A_ )
UpperCamelCase : List[str] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[Any] = self.get_dummy_components()
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline(**A_ )
UpperCamelCase : Tuple = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ )
UpperCamelCase : str = sd_pipe(**A_ ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Optional[int] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[Any] = self.get_dummy_components()
UpperCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**A_ )
UpperCamelCase : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = self.get_dummy_inputs(A_ )
UpperCamelCase : List[str] = "french fries"
UpperCamelCase : Optional[Any] = sd_pipe(**A_ , negative_prompt=A_ )
UpperCamelCase : Any = output.images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : List[str] = StableDiffusionPanoramaPipeline(**A_ )
UpperCamelCase : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ )
UpperCamelCase : Dict = sd_pipe(**A_ , view_batch_size=2 )
UpperCamelCase : Dict = output.images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Tuple = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
UpperCamelCase : Any = StableDiffusionPanoramaPipeline(**A_ )
UpperCamelCase : Dict = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs(A_ )
UpperCamelCase : Any = sd_pipe(**A_ ).images
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : str = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=A_ )
UpperCamelCase : List[Any] = StableDiffusionPanoramaPipeline(**A_ )
UpperCamelCase : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ )
UpperCamelCase : Union[str, Any] = sd_pipe(**A_ ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Optional[Any] = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
UpperCamelCase : Optional[Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = "stabilityai/stable-diffusion-2-base"
UpperCamelCase : str = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
UpperCamelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
UpperCamelCase : List[str] = self.get_inputs()
UpperCamelCase : Union[str, Any] = pipe(**A_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCamelCase : Any = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=A_ )
UpperCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
UpperCamelCase : str = self.get_inputs()
UpperCamelCase : Optional[int] = pipe(**A_ ).images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCamelCase : List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = 0
def callback_fn(A_ , A_ , A_ ) -> None:
UpperCamelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCamelCase : Tuple = latents[0, -3:, -3:, -1]
UpperCamelCase : Any = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCamelCase : str = latents[0, -3:, -3:, -1]
UpperCamelCase : Union[str, Any] = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[int] = "stabilityai/stable-diffusion-2-base"
UpperCamelCase : List[str] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
UpperCamelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
UpperCamelCase : str = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
UpperCamelCase : int = self.get_inputs()
pipe(**A_ , callback=A_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : str = "stabilityai/stable-diffusion-2-base"
UpperCamelCase : List[str] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
UpperCamelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
UpperCamelCase : Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Any = self.get_inputs()
UpperCamelCase : Optional[Any] = pipe(**A_ )
UpperCamelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 52
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a__ : int =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , *__A : Optional[int] , **__A : str ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 53
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 0
|
"""simple docstring"""
import requests
a__ : Dict = '''''' # <-- Put your OpenWeatherMap appid here!
a__ : Union[str, Any] = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase__ (lowerCAmelCase_ = "Chicago" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = "Kolkata, India" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = 55.68 , lowerCAmelCase_ = 12.57 , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a__ : Any = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 54
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 0
|
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = []
lowerCamelCase_ = 1
while len(UpperCAmelCase_ ) < 1E6:
constant.append(str(UpperCAmelCase_ ) )
i += 1
lowerCamelCase_ = "".join(UpperCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 0
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = F"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 0
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
A : Optional[Any] = logging.get_logger(__name__)
A : Union[str, Any] = "T5Config"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""mt5"""
__UpperCAmelCase : Any =MTaConfig
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""mt5"""
__UpperCAmelCase : Any =MTaConfig
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""mt5"""
__UpperCAmelCase : List[Any] =MTaConfig
| 57
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
snake_case : Dict = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
snake_case : Union[str, Any] = input_file.read()
snake_case : str = regexp.search(snake_case__ )
return match
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str ) -> Dict:
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
snake_case : List[str] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
snake_case : int = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case : List[Any] = regexp.finditer(snake_case__ )
snake_case : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = Path("./datasets" )
snake_case : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : List[str] = Path("./datasets" )
snake_case : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 59
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : List[str] = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_a = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(lowercase_ )
def __call__( self , lowercase_ , **lowercase_ ):
"""simple docstring"""
return super().__call__(lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return {}, {}, {}
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = load_image(lowercase_ )
UpperCAmelCase_ : int = image.size
UpperCAmelCase_ : Optional[int] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model(**lowercase_ )
return model_outputs
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = model_outputs.predicted_depth
UpperCAmelCase_ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowercase_ )
UpperCAmelCase_ : Tuple = prediction.squeeze().cpu().numpy()
UpperCAmelCase_ : str = (output * 255 / np.max(lowercase_ )).astype("uint8" )
UpperCAmelCase_ : str = Image.fromarray(lowercase_ )
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : int = predicted_depth
UpperCAmelCase_ : Any = depth
return output_dict
| 61
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
for param in module.parameters():
__UpperCamelCase =False
def _UpperCAmelCase ( ):
__UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCamelCase ='mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =plt.imshow(SCREAMING_SNAKE_CASE__ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( ):
__UpperCamelCase =datetime.now()
__UpperCamelCase =current_time.strftime('%H:%M:%S' )
return timestamp
| 62
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
debug_launcher(test_script.main )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 64
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = 10
UpperCAmelCase__ = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
UpperCAmelCase__ = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(__A ) ),
}, features=__A, )
return dataset
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=__A )
return filename
# FILE_CONTENT + files
UpperCamelCase__ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt"
UpperCAmelCase__ = FILE_CONTENT
with open(__A, "w" ) as f:
f.write(__A )
return filename
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
UpperCAmelCase__ = bytes(__A, "utf-8" )
with bza.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
UpperCAmelCase__ = bytes(__A, "utf-8" )
with gzip.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
UpperCAmelCase__ = bytes(__A, "utf-8" )
with lza.frame.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(__A, "w" ) as archive:
archive.write(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import lzma
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
UpperCAmelCase__ = bytes(__A, "utf-8" )
with lzma.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
import zipfile
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
UpperCAmelCase__ = bytes(__A, "utf-8" )
with zstd.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "file.xml"
UpperCAmelCase__ = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(__A, "w" ) as f:
f.write(__A )
return filename
UpperCamelCase__ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
UpperCamelCase__ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
UpperCamelCase__ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
UpperCamelCase__ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
UpperCamelCase__ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = datasets.Dataset.from_dict(__A )
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(__A ) ) as con:
UpperCAmelCase__ = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(__A, "w", newline="" ) as f:
UpperCAmelCase__ = csv.DictWriter(__A, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(__A, "w", newline="" ) as f:
UpperCAmelCase__ = csv.DictWriter(__A, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(__A, "rb" ) as f:
UpperCAmelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__A, "wb" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(__A ) )
f.write(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(__A, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
UpperCAmelCase__ = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(__A, "wb" ) as f:
UpperCAmelCase__ = pq.ParquetWriter(__A, schema=__A )
UpperCAmelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__A ) )] for k in DATA[0]}, schema=__A )
writer.write_table(__A )
writer.close()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase__ = {"data": DATA}
with open(__A, "w" ) as f:
json.dump(__A, __A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase__ = {"data": DATA_DICT_OF_LISTS}
with open(__A, "w" ) as f:
json.dump(__A, __A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(__A, "w" ) as f:
for item in DATA:
f.write(json.dumps(__A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(__A, "w" ) as f:
for item in DATA:
f.write(json.dumps(__A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(__A, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(__A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(__A, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(__A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(__A, "rb" ) as orig_file:
with gzip.open(__A, "wb" ) as zipped_file:
zipped_file.writelines(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(__A, "rb" ) as orig_file:
with gzip.open(__A, "wb" ) as zipped_file:
zipped_file.writelines(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(__A ) )
f.write(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.join("nested", os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.basename(__A ) )
f.add(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("nested", os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = ["0", "1", "2", "3"]
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(__A, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = ["0", "1", "2", "3"]
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(__A, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = ["0", "1", "2", "3"]
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(__A, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(__A ) )
f.write(__A, arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
f.write(__A, arcname=os.path.join("main_dir", os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename("unsupported.ext" ) )
f.write(__A, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
UpperCAmelCase__ = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(__A, "w", encoding="utf-8" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(__A, "w" ) as f:
f.write(__A, arcname=os.path.basename(__A ) )
f.write(__A, arcname=os.path.basename(__A ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 65
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 0
|
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__a = logging.get_logger(__name__)
__a = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(snake_case )
def __call__( self: int , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Optional[Any] ) -> bool:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Any , snake_case: int , snake_case: Optional[int] = None ) -> List[str]:
snake_case_ :Dict = max_length
snake_case_ :Union[str, Any] = max_position_embeddings
@add_start_docstrings(snake_case )
def __call__( self: Dict , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool:
snake_case_ :Dict = input_ids.shape[-1]
snake_case_ :Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , snake_case: int , snake_case: int ) -> Dict:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" , snake_case , )
snake_case_ :Union[str, Any] = start_length
snake_case_ :int = max_new_tokens
snake_case_ :int = start_length + max_new_tokens
@add_start_docstrings(snake_case )
def __call__( self: Optional[int] , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: float , snake_case: Optional[float] = None ) -> Union[str, Any]:
snake_case_ :Tuple = max_time
snake_case_ :List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case )
def __call__( self: Tuple , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(snake_case )
def __call__( self: str , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: int ) -> bool:
return any(criteria(snake_case , snake_case ) for criteria in self )
@property
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(snake_case , snake_case ):
return stopping_criterium.max_length
elif isinstance(snake_case , snake_case ):
return stopping_criterium.max_length
return None
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = stopping_criteria.max_length
snake_case_ :List[str] = deepcopy(_lowercase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""", _lowercase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowercase ) )
return new_stopping_criteria
| 66
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase =[]
__UpperCAmelCase =[]
__UpperCAmelCase ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__UpperCAmelCase =[
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
__UpperCAmelCase =0
for log in Path().glob("*.log"):
__UpperCAmelCase =0
with open(log, "r") as f:
for line in f:
__UpperCAmelCase =json.loads(line)
if line.get("nodeid", "") != "":
__UpperCAmelCase =line["nodeid"]
if line.get("duration", None) is not None:
__UpperCAmelCase =f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase =[]
log.unlink()
__UpperCAmelCase =""
__UpperCAmelCase =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for test in failed_tests:
__UpperCAmelCase =test[0].split("::")
__UpperCAmelCase =data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCAmelCase =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase =[test[0] for test in failed_table]
__UpperCAmelCase =list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase =tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
__UpperCAmelCase ="Too many failed tests, please see the full report in the Action results."
__UpperCAmelCase =len(err) + 1_0
__UpperCAmelCase =message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
__UpperCAmelCase ="No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCAmelCase =WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
__UpperCAmelCase ={
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
__UpperCAmelCase =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCAmelCase =response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase =""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase =row[0]
else:
__UpperCAmelCase =""
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 67
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCAmelCase__ ( ) -> Node | None:
'''simple docstring'''
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE_: Node | None , SCREAMING_SNAKE_CASE_: int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
A__ = []
A__ = 0
A__ = height(SCREAMING_SNAKE_CASE_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = 0
return output
def lowerCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
A__ = make_tree()
print(F'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE_ )}' )
print(F'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print(F'Height of Tree: {height(SCREAMING_SNAKE_CASE_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(SCREAMING_SNAKE_CASE_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(SCREAMING_SNAKE_CASE_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE_ , level=SCREAMING_SNAKE_CASE_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 0
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase ( ) -> str:
snake_case_ = 10
snake_case_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
snake_case_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(UpperCAmelCase ) ),
} , features=UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCAmelCase )
return filename
# FILE_CONTENT + files
__UpperCamelCase = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
snake_case_ = FILE_CONTENT
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
import bza
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
snake_case_ = bytes(UpperCAmelCase , 'utf-8' )
with bza.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]:
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
snake_case_ = bytes(UpperCAmelCase , 'utf-8' )
with gzip.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
snake_case_ = bytes(UpperCAmelCase , 'utf-8' )
with lza.frame.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCAmelCase , 'w' ) as archive:
archive.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
import tarfile
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCAmelCase , 'w' ) as f:
f.add(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
import lzma
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
snake_case_ = bytes(UpperCAmelCase , 'utf-8' )
with lzma.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
import zipfile
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
snake_case_ = bytes(UpperCAmelCase , 'utf-8' )
with zstd.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
snake_case_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return filename
__UpperCamelCase = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__UpperCamelCase = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__UpperCamelCase = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__UpperCamelCase = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__UpperCamelCase = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase ( ) -> Any:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = datasets.Dataset.from_dict(UpperCAmelCase )
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCAmelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCAmelCase , 'w' , newline='' ) as f:
snake_case_ = csv.DictWriter(UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCAmelCase , 'w' , newline='' ) as f:
snake_case_ = csv.DictWriter(UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
import bza
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCAmelCase , 'rb' ) as f:
snake_case_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase , 'wb' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
snake_case_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCAmelCase , 'wb' ) as f:
snake_case_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase )
snake_case_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase ) )] for k in DATA[0]} , schema=UpperCAmelCase )
writer.write_table(UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
snake_case_ = {'data': DATA}
with open(UpperCAmelCase , 'w' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
snake_case_ = {'data': DATA_DICT_OF_LISTS}
with open(UpperCAmelCase , 'w' ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCAmelCase , 'w' ) as f:
f.add(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
f.add(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCAmelCase , 'w' ) as f:
f.add(UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = ['0', '1', '2', '3']
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = ['0', '1', '2', '3']
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = ['0', '1', '2', '3']
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
f.write(UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( ) -> Optional[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase ( ) -> str:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ) )
f.write(UpperCAmelCase , arcname=os.path.basename(UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 69
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = '''bart'''
_lowercase: Union[str, Any] = ['''past_key_values''']
_lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(__snake_case , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(__snake_case , __snake_case )
_lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case )
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 70
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A ( ) -> int:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase : Dict ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching ,'os.path.join' ,a_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os ,_PatchedModuleObj )
assert isinstance(_test_patching.os.path ,_PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path ,_PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A ( ) -> Any:
assert _test_patching.open is open
__UpperCamelCase : Optional[int] ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching ,'open' ,a_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A ( ) -> Union[str, Any]:
# pandas.read_csv is not present in _test_patching
__UpperCamelCase : List[str] ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching ,'pandas.read_csv' ,a_ ):
pass
def A ( ) -> str:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__UpperCamelCase : Any ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching ,'len' ,a_ ) is None
with patch_submodule(_test_patching ,'len' ,a_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A ( ) -> Dict:
__UpperCamelCase : List[Any] ='__test_patch_submodule_start_and_stop_mock__'
__UpperCamelCase : Tuple =patch_submodule(_test_patching ,'open' ,a_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase : str ='__test_patch_submodule_successive_join__'
__UpperCamelCase : Optional[int] ='__test_patch_submodule_successive_dirname__'
__UpperCamelCase : Any ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching ,'os.path.join' ,a_ ):
with patch_submodule(_test_patching ,'os.rename' ,a_ ):
with patch_submodule(_test_patching ,'os.path.dirname' ,a_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching ,'os.rename' ,a_ ):
with patch_submodule(_test_patching ,'os.path.join' ,a_ ):
with patch_submodule(_test_patching ,'os.path.dirname' ,a_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A ( ) -> Any:
__UpperCamelCase : Optional[Any] ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching ,'__module_that_doesn_exist__.__attribute_that_doesn_exist__' ,a_ ):
pass
with patch_submodule(_test_patching ,'os.__attribute_that_doesn_exist__' ,a_ ):
pass
| 71
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def snake_case_ ( A_ : list, A_ : list, A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : Any = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A_ )] )
_lowerCamelCase : Optional[int] = np.array(A_ )
_lowerCamelCase : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), A_ ) ), x.transpose() ), A_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def snake_case_ ( A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : List[str] = (1, 2, 1)
_lowerCamelCase : Any = (1, 1, 0, 7)
_lowerCamelCase : int = SARIMAX(
A_, exog=A_, order=A_, seasonal_order=A_ )
_lowerCamelCase : Optional[int] = model.fit(disp=A_, maxiter=6_00, method='''nm''' )
_lowerCamelCase : Any = model_fit.predict(1, len(A_ ), exog=[test_match] )
return result[0]
def snake_case_ ( A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : Any = SVR(kernel='''rbf''', C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(A_, A_ )
_lowerCamelCase : Optional[Any] = regressor.predict(A_ )
return y_pred[0]
def snake_case_ ( A_ : list ):
'''simple docstring'''
train_user.sort()
_lowerCamelCase : Dict = np.percentile(A_, 25 )
_lowerCamelCase : Optional[int] = np.percentile(A_, 75 )
_lowerCamelCase : Dict = qa - qa
_lowerCamelCase : Tuple = qa - (iqr * 0.1)
return low_lim
def snake_case_ ( A_ : list, A_ : float ):
'''simple docstring'''
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = 0
for i in list_vote:
if i > actual_result:
_lowerCamelCase : Optional[Any] = not_safe + 1
else:
if abs(abs(A_ ) - abs(A_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCAmelCase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowerCAmelCase__ = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCAmelCase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCAmelCase__ = normalize_df[:, 2].tolist()
lowerCAmelCase__ = normalize_df[:, 0].tolist()
lowerCAmelCase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCAmelCase__ = normalize_df[:, [1, 2]].tolist()
lowerCAmelCase__ = x[: len(x) - 1]
lowerCAmelCase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCAmelCase__ = total_date[: len(total_date) - 1]
lowerCAmelCase__ = total_user[: len(total_user) - 1]
lowerCAmelCase__ = total_match[: len(total_match) - 1]
lowerCAmelCase__ = total_date[len(total_date) - 1 :]
lowerCAmelCase__ = total_user[len(total_user) - 1 :]
lowerCAmelCase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCAmelCase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCAmelCase__ = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 72
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[int] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
__lowerCamelCase : List[str] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__)['last_hidden_state']
__lowerCamelCase : str = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
# compare the actual values for a slice.
__lowerCamelCase : Any = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 73
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Dict ):
# initialize config
if "resnet-50" in model_name:
A = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
A = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
A = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
A = 'panoptic' in model_name
if is_panoptic:
A = 250
else:
A = 91
A = 'huggingface/label-files'
A = 'coco-detection-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _snake_case ( snake_case__ : int ):
# here we list all keys to be renamed (original name on the left, our name on the right)
A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
A = state_dict.pop(snake_case__ )
A = val
def _snake_case ( snake_case__ : int , snake_case__ : Optional[int]=False ):
A = ''
if is_panoptic:
A = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _snake_case ( snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : int=False ):
A , A = get_detr_config(snake_case__ )
# load original model from torch hub
A = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'Converting model {model_name}...' )
A = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
A = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
A = 'detr.' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
A = state_dict.pop(snake_case__ )
A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A = state_dict.pop(snake_case__ )
A = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
A = state_dict.pop(snake_case__ )
A = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A = state_dict.pop(snake_case__ )
A = val
# finally, create HuggingFace model and load state dict
A = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
A = 'coco_panoptic' if is_panoptic else 'coco_detection'
A = DetrImageProcessor(format=snake_case__ )
A = processor(images=prepare_img() , return_tensors='pt' )
A = encoding['pixel_values']
A = detr(snake_case__ )
A = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_lowercase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a_ : Dict = HfApi()
a_ : Tuple = {}
# fmt: off
a_ : Union[str, Any] = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
a_ : List[str] = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
a_ : Tuple = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
a_ : Union[str, Any] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
a_ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
a_ : Union[str, Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
a_ : Optional[int] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
a_ : Tuple = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
a_ : List[Any] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
a_ : Tuple = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
a_ : List[str] = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
a_ : int = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
a_ : Union[str, Any] = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
a_ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
a_ : Optional[int] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
a_ : Union[str, Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a_ : Optional[Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
a_ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
a_ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a_ : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a_ : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a_ : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 75
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='AutoTokenizer'
lowerCamelCase__ =['tokenizer']
lowerCamelCase__ ={
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : Any , a : int , a : Any=None ) -> List[Any]:
"""simple docstring"""
super().__init__(a )
SCREAMING_SNAKE_CASE : Tuple = speaker_embeddings
@classmethod
def __UpperCamelCase ( cls : Optional[int] , a : Optional[Any] , a : Any="speaker_embeddings_path.json" , **a : List[str] ) -> List[str]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = get_file_from_repo(
a , a , subfolder=kwargs.pop("subfolder" , a ) , cache_dir=kwargs.pop("cache_dir" , a ) , force_download=kwargs.pop("force_download" , a ) , proxies=kwargs.pop("proxies" , a ) , resume_download=kwargs.pop("resume_download" , a ) , local_files_only=kwargs.pop("local_files_only" , a ) , use_auth_token=kwargs.pop("use_auth_token" , a ) , revision=kwargs.pop("revision" , a ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(a , a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
else:
with open(a ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE : str = json.load(a )
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(a , **a )
return cls(tokenizer=a , speaker_embeddings=a )
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Tuple="speaker_embeddings_path.json" , a : Union[str, Any]="speaker_embeddings" , a : bool = False , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(a , a , "v2" ) , exist_ok=a )
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE : Tuple = self._load_voice_preset(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , a , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=a , )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a , F"{prompt_key}_{key}.npy" )
SCREAMING_SNAKE_CASE : int = tmp_dict
with open(os.path.join(a , a ) , "w" ) as fp:
json.dump(a , a )
super().save_pretrained(a , a , **a )
def __UpperCamelCase ( self : List[str] , a : str = None , **a : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
SCREAMING_SNAKE_CASE : Optional[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , a ) , cache_dir=kwargs.pop("cache_dir" , a ) , force_download=kwargs.pop("force_download" , a ) , proxies=kwargs.pop("proxies" , a ) , resume_download=kwargs.pop("resume_download" , a ) , local_files_only=kwargs.pop("local_files_only" , a ) , use_auth_token=kwargs.pop("use_auth_token" , a ) , revision=kwargs.pop("revision" , a ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
SCREAMING_SNAKE_CASE : Optional[int] = np.load(a )
return voice_preset_dict
def __UpperCamelCase ( self : Dict , a : Optional[dict] = None ) -> List[Any]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : Union[str, Any] , a : Dict=None , a : Dict=None , a : Tuple="pt" , a : List[Any]=256 , a : Optional[Any]=False , a : Tuple=True , a : str=False , **a : List[str] , ) -> Tuple:
"""simple docstring"""
if voice_preset is not None and not isinstance(a , a ):
if (
isinstance(a , a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE : str = self._load_voice_preset(a )
else:
if isinstance(a , a ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE : List[str] = voice_preset + ".npz"
SCREAMING_SNAKE_CASE : Dict = np.load(a )
if voice_preset is not None:
self._validate_voice_preset_dict(a , **a )
SCREAMING_SNAKE_CASE : Dict = BatchFeature(data=a , tensor_type=a )
SCREAMING_SNAKE_CASE : str = self.tokenizer(
a , return_tensors=a , padding="max_length" , max_length=a , return_attention_mask=a , return_token_type_ids=a , add_special_tokens=a , **a , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE : Optional[int] = voice_preset
return encoded_text
| 76
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> Union[str, Any]:
lowercase__ : Dict = psutil.Process()
lowercase__ : int = False
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[str] = -1
while True:
lowercase__ : str = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[Any] = True
lowercase__ : Optional[int] = threading.Thread(target=self.peak_monitor )
lowercase__ : List[str] = True
self.thread.start()
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = False
self.thread.join()
return self.cpu_memory_peak
_UpperCamelCase : Optional[Any] = PeakCPUMemory()
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase__ : Tuple = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase__ : Union[str, Any] = torch.cuda.memory_allocated(_lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : int = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase__ : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowercase__ : Any = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase__ : Optional[int] = (torch.cuda.memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
lowercase__ : Union[str, Any] = (torch.cuda.max_memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
return measures
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
'''simple docstring'''
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(_lowerCAmelCase )]:.2f}MiB""" )
lowercase__ : List[Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 77
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 0
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
UpperCAmelCase = get_activation('swish' )
self.assertIsInstance(lowercase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self :List[str] ) -> List[str]:
UpperCAmelCase = get_activation('silu' )
self.assertIsInstance(lowercase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = get_activation('mish' )
self.assertIsInstance(lowercase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = get_activation('gelu' )
self.assertIsInstance(lowercase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 78
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 0
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase_ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase_ = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0]
@deprecated(__lowercase , "Please use tf.data to implement this functionality." )
def __lowercase ( __lowercase ) -> List[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
_A = _readaa(__lowercase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_A = _readaa(__lowercase )
_A = _readaa(__lowercase )
_A = _readaa(__lowercase )
_A = bytestream.read(rows * cols * num_images )
_A = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
_A = data.reshape(__lowercase , __lowercase , __lowercase , 1 )
return data
@deprecated(__lowercase , "Please use tf.one_hot on tensors." )
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = labels_dense.shape[0]
_A = numpy.arange(__lowercase ) * num_classes
_A = numpy.zeros((num_labels, num_classes) )
_A = 1
return labels_one_hot
@deprecated(__lowercase , "Please use tf.data to implement this functionality." )
def __lowercase ( __lowercase , __lowercase=False , __lowercase=10 ) -> List[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
_A = _readaa(__lowercase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_A = _readaa(__lowercase )
_A = bytestream.read(__lowercase )
_A = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase , __lowercase )
return labels
class _UpperCAmelCase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Optional[Any]=dtypes.floataa , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=None , ):
'''simple docstring'''
_A , _A = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_A = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_A = 10000
_A = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_A = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_A = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_A = images.astype(numpy.floataa )
_A = numpy.multiply(__UpperCAmelCase , 1.0 / 255.0 )
_A = images
_A = labels
_A = 0
_A = 0
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self._images
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._labels
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._epochs_completed
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
'''simple docstring'''
if fake_data:
_A = [1] * 784
_A = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
_A = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_A = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
_A = self.images[perma]
_A = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_A = self._num_examples - start
_A = self._images[start : self._num_examples]
_A = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_A = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
_A = self.images[perm]
_A = self.labels[perm]
# Start next epoch
_A = 0
_A = batch_size - rest_num_examples
_A = self._index_in_epoch
_A = self._images[start:end]
_A = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_A = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase , "Please write your own downloading logic." )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
_A = os.path.join(__lowercase , __lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
_A = f.size()
print("Successfully downloaded" , __lowercase , __lowercase , "bytes." )
return filepath
@deprecated(
__lowercase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __lowercase ( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5000 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ) -> List[str]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase )
_A = fake()
_A = fake()
_A = fake()
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
if not source_url: # empty string check
_A = DEFAULT_SOURCE_URL
_A = "train-images-idx3-ubyte.gz"
_A = "train-labels-idx1-ubyte.gz"
_A = "t10k-images-idx3-ubyte.gz"
_A = "t10k-labels-idx1-ubyte.gz"
_A = _maybe_download(
__lowercase , __lowercase , source_url + train_images_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_images(__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + train_labels_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_labels(__lowercase , one_hot=__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + test_images_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_images(__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + test_labels_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_labels(__lowercase , one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
_A = (
"Validation size should be between 0 and "
F'''{len(__lowercase )}. Received: {validation_size}.'''
)
raise ValueError(__lowercase )
_A = train_images[:validation_size]
_A = train_labels[:validation_size]
_A = train_images[validation_size:]
_A = train_labels[validation_size:]
_A = {"dtype": dtype, "reshape": reshape, "seed": seed}
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
| 79
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( a__ , a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionInpaintPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase = frozenset([] )
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCamelCase__ = CLIPTextModel(a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , a , a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(a ) ).convert("RGB" ).resize((64, 64) )
UpperCamelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a )
else:
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInpaintPipeline(**a )
UpperCamelCase__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = self.get_dummy_inputs(a )
UpperCamelCase__ = sd_pipe(**a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = PNDMScheduler.from_pretrained(a , subfolder="scheduler" )
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 80
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( lowerCamelCase__ ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=[16, 48, 96] , _snake_case=[1, 3, 6] , _snake_case=[1, 2, 10] , _snake_case=[7, 3, 3] , _snake_case=[4, 2, 2] , _snake_case=[2, 1, 1] , _snake_case=[2, 2, 2] , _snake_case=[False, False, True] , _snake_case=[0.0, 0.0, 0.0] , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=True , _snake_case=True , _snake_case=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_sizes
_lowerCAmelCase = patch_stride
_lowerCAmelCase = patch_padding
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = num_heads
_lowerCAmelCase = stride_kv
_lowerCAmelCase = depth
_lowerCAmelCase = cls_token
_lowerCAmelCase = attention_drop_rate
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case , training=_snake_case )
_lowerCAmelCase = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFCvtForImageClassification(_snake_case )
_lowerCAmelCase = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModelTester(self )
_lowerCAmelCase = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_lowerCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1e-4 ) )
| 82
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Tuple = size if size is not None else {'shortest_edge': 224}
_UpperCamelCase : Optional[Any] = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
_UpperCamelCase : int = do_resize
_UpperCamelCase : Tuple = size
_UpperCamelCase : int = resample
_UpperCamelCase : Optional[int] = do_center_crop
_UpperCamelCase : Optional[int] = crop_size
_UpperCamelCase : str = do_rescale
_UpperCamelCase : List[Any] = rescale_factor
_UpperCamelCase : int = do_normalize
_UpperCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCamelCase : int = int((256 / 224) * size['shortest_edge'] )
_UpperCamelCase : int = get_resize_output_image_size(lowerCamelCase__ ,size=lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : Dict = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase__ ,size=(size_dict['height'], size_dict['width']) ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[str] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = None ,lowerCamelCase__ : Optional[Union[float, Iterable[float]]] = None ,lowerCamelCase__ : Optional[TensorType] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Optional[int] ,):
'''simple docstring'''
_UpperCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Any = resample if resample is not None else self.resample
_UpperCamelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Any = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
_UpperCamelCase : Optional[int] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_UpperCamelCase : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCamelCase : Dict = [self.resize(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase : Dict = [self.center_crop(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase : int = [self.rescale(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase : Union[str, Any] = [self.normalize(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : Optional[Any] = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 83
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , ) -> Optional[Any]:
lowerCAmelCase_ :int = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Optional[Any] = num_channels
lowerCAmelCase_ :Optional[Any] = image_size
lowerCAmelCase_ :int = min_resolution
lowerCAmelCase_ :List[Any] = max_resolution
lowerCAmelCase_ :List[Any] = do_resize
lowerCAmelCase_ :Tuple = size
lowerCAmelCase_ :Dict = apply_ocr
def __lowerCAmelCase ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """apply_ocr""" ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ :Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase_ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
lowerCAmelCase_ :str = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase_ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Dict = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :int = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# with apply_OCR = True
lowerCAmelCase_ :str = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase_ :int = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase_ :Optional[Any] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase_ :Optional[Any] = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase_ :List[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase_ :Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
lowerCAmelCase_ :int = LayoutLMvaImageProcessor(apply_ocr=__A )
lowerCAmelCase_ :int = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case_ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , a__ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
# load decoder from hub
snake_case_ = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCAmelCase__ ( self , **a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = self.add_kwargs_tokens_map.copy()
kwargs.update(a__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> int:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Optional[int]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(a__ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = floats_list((3, 1_000) )
snake_case_ = feature_extractor(a__ , return_tensors="np" )
snake_case_ = processor(a__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = "This is a test string"
snake_case_ = processor(text=a__ )
snake_case_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self , a__=(2, 10, 16) , a__=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(a__ )
return np.random.rand(*a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case_ = processor.decode(a__ )
snake_case_ = decoder.decode_beams(a__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case_ = processor.batch_decode(a__ )
else:
with get_context(a__ ).Pool() as pool:
snake_case_ = processor.batch_decode(a__ , a__ )
snake_case_ = list(a__ )
with get_context("fork" ).Pool() as p:
snake_case_ = decoder.decode_beams_batch(a__ , a__ )
snake_case_ , snake_case_ , snake_case_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a__ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(a__ , decoded_processor.logit_score )
self.assertListEqual(a__ , decoded_processor.lm_score )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
snake_case_ = 15
snake_case_ = -2_0.0
snake_case_ = -4.0
snake_case_ = processor.batch_decode(
a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
snake_case_ = decoded_processor_out.text
snake_case_ = list(a__ )
with get_context("fork" ).Pool() as pool:
snake_case_ = decoder.decode_beams_batch(
a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
snake_case_ = [d[0][0] for d in decoded_decoder_out]
snake_case_ = [d[0][2] for d in decoded_decoder_out]
snake_case_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , a__ )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , a__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , a__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
snake_case_ = 2.0
snake_case_ = 5.0
snake_case_ = -2_0.0
snake_case_ = True
snake_case_ = processor.batch_decode(
a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
snake_case_ = decoded_processor_out.text
snake_case_ = list(a__ )
decoder.reset_params(
alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
with get_context("fork" ).Pool() as pool:
snake_case_ = decoder.decode_beams_batch(
a__ , a__ , )
snake_case_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , a__ )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ = os.listdir(a__ )
snake_case_ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(a__ )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ = os.listdir(a__ )
snake_case_ = os.listdir(a__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = floats_list((3, 1_000) )
snake_case_ = processor_wavaveca(a__ , return_tensors="np" )
snake_case_ = processor_auto(a__ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
snake_case_ = self._get_dummy_logits()
snake_case_ = processor_wavaveca.batch_decode(a__ )
snake_case_ = processor_auto.batch_decode(a__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCAmelCase__ ( a__ , a__ ) -> int:
'''simple docstring'''
snake_case_ = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = self._get_dummy_logits()[0]
snake_case_ = processor.decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = self._get_dummy_logits()
snake_case_ = processor.batch_decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(a__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
import torch
snake_case_ = load_dataset("common_voice" , "en" , split="train" , streaming=a__ )
snake_case_ = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
snake_case_ = iter(a__ )
snake_case_ = next(a__ )
snake_case_ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case_ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case_ = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case_ = model(a__ ).logits.cpu().numpy()
snake_case_ = processor.decode(logits[0] , output_word_offsets=a__ )
snake_case_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case_ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case_ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(a__ , "word" ) ) , a__ )
self.assertEqual(" ".join(self.get_from_offsets(a__ , "word" ) ) , output.text )
# output times
snake_case_ = torch.tensor(self.get_from_offsets(a__ , "start_time" ) )
snake_case_ = torch.tensor(self.get_from_offsets(a__ , "end_time" ) )
# fmt: off
snake_case_ = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
snake_case_ = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(a__ , a__ , atol=0.0_1 ) )
| 85
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 86
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 0
|
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str=None , UpperCamelCase__ : int=None ) -> List[str]:
"""simple docstring"""
__magic_name__ = list(poly_a or [0] )[:]
__magic_name__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__magic_name__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__magic_name__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__magic_name__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__magic_name__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__magic_name__ = self.__multiply()
def _lowercase ( self : int , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase__ ) <= 1:
return dft[0]
#
__magic_name__ = self.c_max_length // 2
while next_ncol > 0:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root**next_ncol
# First half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__magic_name__ = new_dft
__magic_name__ = next_ncol // 2
return dft[0]
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.__dft("""A""" )
__magic_name__ = self.__dft("""B""" )
__magic_name__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__magic_name__ = 2
while next_ncol <= self.c_max_length:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root ** (next_ncol // 2)
__magic_name__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__magic_name__ = new_inverse_c
next_ncol *= 2
# Unpack
__magic_name__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__magic_name__ = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__magic_name__ = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 0
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_="." ):
_a : Optional[Any] = []
for k, v in d.items():
_a : str = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
_a : Optional[Any] = argparse.Namespace()
with open(lowerCAmelCase_ , 'r' ) as yaml_file:
try:
_a : List[str] = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader )
_a : str = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) )
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : int = MobileViTVaConfig()
_a : Tuple = False
# dataset
if task_name.startswith('imagenet1k_' ):
_a : Any = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
_a : Optional[Any] = 384
else:
_a : List[Any] = 256
_a : Optional[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_a : str = 21000
if int(task_name.strip().split('_' )[-1] ) == 384:
_a : Any = 384
else:
_a : int = 256
_a : List[Any] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_a : Union[str, Any] = 151
_a : Any = 512
_a : str = 'ade20k-id2label.json'
_a : int = True
elif task_name.startswith('voc_' ):
_a : Tuple = 21
_a : Optional[Any] = 512
_a : Optional[int] = 'pascal-voc-id2label.json'
_a : Tuple = True
# orig_config
_a : List[str] = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_a : List[str] = getattr(lowerCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(lowerCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_a : Tuple = getattr(lowerCAmelCase_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_a : Tuple = getattr(lowerCAmelCase_ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
_a : int = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_a : Optional[int] = 'huggingface/label-files'
_a : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : List[Any] = dct.pop(lowerCAmelCase_ )
_a : List[Any] = val
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
if base_model:
_a : str = ''
else:
_a : Optional[int] = 'mobilevitv2.'
_a : Tuple = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_a : Optional[Any] = k[8:]
else:
_a : Union[str, Any] = k
if ".block." in k:
_a : List[Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_a : Tuple = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_a : Optional[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_a : List[str] = k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
_a : Optional[int] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
_a : int = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_a : Tuple = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
_a : Union[str, Any] = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
_a : Tuple = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
_a : int = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
_a : Dict = [0, 1]
elif i == 4:
_a : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_a : Union[str, Any] = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
_a : Optional[Any] = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
_a : Dict = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
_a : Any = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
_a : List[str] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_a : Tuple = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_a : Optional[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_a : Union[str, Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_a : Optional[int] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_a : List[Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_a : str = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_a : Tuple = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_a : List[str] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : Any = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( ) -> Any:
_a : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_a : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : Union[str, Any] = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ )
# load original state_dict
_a : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_a : Optional[Any] = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
_a : Tuple = False
else:
_a : int = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
_a : Optional[Any] = False
# remove and rename some keys of load the original model
_a : str = checkpoint
remove_unused_keys(lowerCAmelCase_ )
_a : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_a : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
_a : Dict = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith('imagenet' ):
_a : Dict = outputs.logits
_a : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_a : List[str] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 89
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BeitFeatureExtractor"]
__A = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 0
|
"""simple docstring"""
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )]
SCREAMING_SNAKE_CASE_ : str = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : int = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
SCREAMING_SNAKE_CASE_ : List[str] = [''''''.join(__a ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ : Any = ''''''.join(__a )
return output_string
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : List[str] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : str = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ : str = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
SCREAMING_SNAKE_CASE_ : Any = '''''' # reads as zigzag
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _A (__a ) -> dict[int, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {}
for key_guess in range(1 , len(__a ) ): # tries every key
SCREAMING_SNAKE_CASE_ : List[Any] = decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a__ ( snake_case__ ):
def __init__( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = params
__lowerCAmelCase = np.array(_A )
__lowerCAmelCase = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _A ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.params.max_model_input_size
__lowerCAmelCase = self.lengths > max_len
logger.info(f"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A , _A ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
__lowerCAmelCase = []
__lowerCAmelCase = []
if self.params.mlm:
__lowerCAmelCase , __lowerCAmelCase = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
__lowerCAmelCase , __lowerCAmelCase = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__lowerCAmelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__lowerCAmelCase = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
__lowerCAmelCase = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
__lowerCAmelCase = np.array(_A )
__lowerCAmelCase = np.array(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = len(self )
__lowerCAmelCase = self.lengths > 1_1
__lowerCAmelCase = self.token_ids[indices]
__lowerCAmelCase = self.lengths[indices]
__lowerCAmelCase = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__lowerCAmelCase = self.params.special_tok_ids["unk_token"]
__lowerCAmelCase = len(self )
__lowerCAmelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__lowerCAmelCase = (unk_occs / self.lengths) < 0.5
__lowerCAmelCase = self.token_ids[indices]
__lowerCAmelCase = self.lengths[indices]
__lowerCAmelCase = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = [t[0] for t in batch]
__lowerCAmelCase = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
__lowerCAmelCase = max(_A )
# Pad token ids
if self.params.mlm:
__lowerCAmelCase = self.params.special_tok_ids["pad_token"]
else:
__lowerCAmelCase = self.params.special_tok_ids["unk_token"]
__lowerCAmelCase = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
__lowerCAmelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
__lowerCAmelCase = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 92
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Dict = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''git_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE="quick_gelu" , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = hidden_size
lowercase_ : Any = intermediate_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Tuple = num_attention_heads
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Tuple = patch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : int = initializer_range
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = hidden_act
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Union[str, Any] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowercase_ : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''git'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10_24 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1_01 , __SCREAMING_SNAKE_CASE=1_02 , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if vision_config is None:
lowercase_ : str = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowercase_ : List[Any] = GitVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : int = position_embedding_type
lowercase_ : Dict = use_cache
lowercase_ : List[str] = tie_word_embeddings
lowercase_ : Union[str, Any] = num_image_with_embedding
lowercase_ : int = bos_token_id
lowercase_ : int = eos_token_id
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 93
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : Dict = '''facebook/wmt19-en-de'''
snake_case : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : Optional[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
snake_case : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
snake_case : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 94
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =cva.getAffineTransform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cva.warpAffine(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
UpperCAmelCase : Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase , UpperCAmelCase : Optional[int] = gray_img.shape
# set different points to rotate image
UpperCAmelCase : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase : Dict = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase : Any = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase : int = plt.figure(1)
UpperCAmelCase : Any = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 95
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
_lowerCamelCase : Any = start
_lowerCamelCase : Optional[Any] = end
_lowerCamelCase : Optional[int] = val
_lowerCamelCase : List[str] = (start + end) // 2
_lowerCamelCase : Any = left
_lowerCamelCase : Tuple = right
def __repr__( self ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : List[Any] = collection
_lowerCamelCase : Tuple = function
if self.collection:
_lowerCamelCase : str = self._build_tree(0 , len(lowercase ) - 1 )
def A_ ( self , lowercase , lowercase ):
self._update_tree(self.root , lowercase , lowercase )
def A_ ( self , lowercase , lowercase ):
return self._query_range(self.root , lowercase , lowercase )
def A_ ( self , lowercase , lowercase ):
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start] )
_lowerCamelCase : int = (start + end) // 2
_lowerCamelCase : Any = self._build_tree(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = self._build_tree(mid + 1 , lowercase )
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val ) , lowercase , lowercase )
def A_ ( self , lowercase , lowercase , lowercase ):
if node.start == i and node.end == i:
_lowerCamelCase : Any = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase )
else:
self._update_tree(node.right , lowercase , lowercase )
_lowerCamelCase : List[str] = self.fn(node.left.val , node.right.val )
def A_ ( self , lowercase , lowercase , lowercase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase )
def A_ ( self ):
if self.root is not None:
_lowerCamelCase : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
_lowerCamelCase : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowercase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 96
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 0
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ : Union[str, Any] = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a_ ( lowerCamelCase , lowerCamelCase ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = _TestCommandArgs(dataset=lowerCamelCase , all_configs=lowerCamelCase , save_infos=lowerCamelCase )
UpperCAmelCase__ = TestCommand(*lowerCamelCase )
test_command.run()
UpperCAmelCase__ = os.path.join(lowerCamelCase , 'README.md' )
assert os.path.exists(lowerCamelCase )
UpperCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase )
UpperCAmelCase__ = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_3_5_1_5_6_3,
'num_examples': 1_0_0_0_0,
},
{
'name': 'validation',
'num_bytes': 2_3_8_4_1_8,
'num_examples': 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ = getattr(dataset_infos['default'] , lowerCamelCase ), getattr(expected_dataset_infos['default'] , lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(lowerCamelCase , lowerCamelCase )
elif key == "splits":
assert list(lowerCamelCase ) == list(lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 98
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 0
|
from __future__ import annotations
import queue
class A__ :
"""simple docstring"""
def __init__( self , lowercase) -> Any:
'''simple docstring'''
a__ : List[str] = data
a__ : Tuple = None
a__ : int = None
def A_ ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
a__ : Tuple = input('Enter the value of the root node: ' ).strip().lower()
a__ : queue.Queue = queue.Queue()
a__ : List[Any] = TreeNode(int(A__ ) )
q.put(A__ )
while not q.empty():
a__ : int = q.get()
a__ : Optional[Any] = F'Enter the left node of {node_found.data}: '
a__ : List[Any] = input(A__ ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ : Tuple = TreeNode(int(A__ ) )
a__ : Union[str, Any] = left_node
q.put(A__ )
a__ : List[str] = F'Enter the right node of {node_found.data}: '
a__ : str = input(A__ ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ : int = TreeNode(int(A__ ) )
a__ : List[Any] = right_node
q.put(A__ )
raise
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(A__ )
while not q.empty():
a__ : Optional[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(A__ )
while not q.empty():
a__ : Dict = []
while not q.empty():
a__ : Optional[int] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A__ )
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
a__ : list[TreeNode] = []
a__ : Any = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(A__ )
a__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
a__ : int = stack.pop()
# start to traverse its right child
a__ : Tuple = n.right
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
a__ : list[TreeNode] = []
a__ : Tuple = node
while n or stack:
while n:
stack.append(A__ )
a__ : Optional[int] = n.left
a__ : Any = stack.pop()
print(n.data , end=',' )
a__ : List[str] = n.right
def A_ ( A__ ) -> None:
if not isinstance(A__ , A__ ) or not node:
return
a__ , a__ : Optional[Any] = [], []
a__ : str = node
stacka.append(A__ )
while stacka: # to find the reversed order of post order, store it in stack2
a__ : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A_ ( A__ = "" , A__=50 , A__="*" ) -> str:
if not s:
return "\n" + width * char
a__ , a__ : Dict = divmod(width - len(A__ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowercase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 99
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""source""": """What is love ?""", """target""": """life"""}
__SCREAMING_SNAKE_CASE = {"""train""": 1_2, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__SCREAMING_SNAKE_CASE = """\n""".join([contents[field]] * n_lines[split])
with open(os.path.join(lowerCAmelCase__ , f"{split}.{field}") , """w""") as f:
f.write(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "pytorch"):
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """output""")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """data""")
self._create_dummy_data(data_dir=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}")
if is_apex_available():
testargs.append("""--fp16""")
else:
testargs.append("""--gpus=0""")
testargs.append("""--distributed_backend=ddp_cpu""")
testargs.append("""--num_processes=2""")
__SCREAMING_SNAKE_CASE = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env())
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """metrics.json""")
with open(lowerCAmelCase__) as f:
__SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
return result
@require_torch_gpu
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._run_finetune(gpus=1)
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2)
@require_torch_multi_gpu
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._run_finetune(gpus=2)
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2)
@require_torch_gpu
@require_ray
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._run_finetune(gpus=1 , distributed_retriever="""ray""")
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2)
@require_torch_multi_gpu
@require_ray
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._run_finetune(gpus=1 , distributed_retriever="""ray""")
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2)
| 100
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(A__)
processor.save_pretrained(self.tmpdirname)
def A__ ( self ,**A__):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A__).image_processor
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
lowercase = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def A__ ( self):
lowercase = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase = self.get_image_processor(do_normalize=A__ ,padding_value=1.0)
lowercase = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=A__ ,padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ ,return_tensors='''np''')
lowercase = processor(images=A__ ,return_tensors='''np''')
input_feat_extract.pop('''original_sizes''') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
@require_torch
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = [torch.ones((1, 3, 5, 5))]
lowercase = [[1_7_6_4, 2_6_4_6]]
lowercase = [[6_8_3, 1_0_2_4]]
lowercase = processor.post_process_masks(A__ ,A__ ,A__)
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
lowercase = processor.post_process_masks(
A__ ,torch.tensor(A__) ,torch.tensor(A__))
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
# should also work with np
lowercase = [np.ones((1, 3, 5, 5))]
lowercase = processor.post_process_masks(A__ ,np.array(A__) ,np.array(A__))
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
lowercase = [[1, 0], [0, 1]]
with self.assertRaises(A__):
lowercase = processor.post_process_masks(A__ ,np.array(A__) ,np.array(A__))
@require_vision
@require_tf
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(A__)
processor.save_pretrained(self.tmpdirname)
def A__ ( self ,**A__):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A__).image_processor
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
lowercase = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def A__ ( self):
lowercase = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase = self.get_image_processor(do_normalize=A__ ,padding_value=1.0)
lowercase = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=A__ ,padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ ,return_tensors='''np''')
lowercase = processor(images=A__ ,return_tensors='''np''')
input_feat_extract.pop('''original_sizes''') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
@require_tf
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = [tf.ones((1, 3, 5, 5))]
lowercase = [[1_7_6_4, 2_6_4_6]]
lowercase = [[6_8_3, 1_0_2_4]]
lowercase = processor.post_process_masks(A__ ,A__ ,A__ ,return_tensors='''tf''')
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
lowercase = processor.post_process_masks(
A__ ,tf.convert_to_tensor(A__) ,tf.convert_to_tensor(A__) ,return_tensors='''tf''' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
# should also work with np
lowercase = [np.ones((1, 3, 5, 5))]
lowercase = processor.post_process_masks(
A__ ,np.array(A__) ,np.array(A__) ,return_tensors='''tf''')
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
lowercase = processor.post_process_masks(
A__ ,np.array(A__) ,np.array(A__) ,return_tensors='''tf''')
@require_vision
@require_torchvision
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(A__)
processor.save_pretrained(self.tmpdirname)
def A__ ( self ,**A__):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A__).image_processor
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
lowercase = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = np.random.randint(0 ,2 ,size=(1, 3, 5, 5)).astype(np.floataa)
lowercase = [tf.convert_to_tensor(A__)]
lowercase = [torch.tensor(A__)]
lowercase = [[1_7_6_4, 2_6_4_6]]
lowercase = [[6_8_3, 1_0_2_4]]
lowercase = processor.post_process_masks(
A__ ,A__ ,A__ ,return_tensors='''tf''')
lowercase = processor.post_process_masks(
A__ ,A__ ,A__ ,return_tensors='''pt''')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=A__)
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ ,return_tensors='''pt''')['''pixel_values'''].numpy()
lowercase = processor(images=A__ ,return_tensors='''pt''')['''pixel_values'''].numpy()
lowercase = image_processor(A__ ,return_tensors='''tf''')['''pixel_values'''].numpy()
lowercase = processor(images=A__ ,return_tensors='''tf''')['''pixel_values'''].numpy()
self.assertTrue(np.allclose(A__ ,A__))
self.assertTrue(np.allclose(A__ ,A__))
self.assertTrue(np.allclose(A__ ,A__))
| 101
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='nllb-moe'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=12_81_12 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=10_24 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=1_28 , a_=64 , a_=4 , a_=4 , a_=0.001 , a_=0.001 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Optional[Any] = encoder_layers
__snake_case : Dict = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Dict = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Optional[Any] = dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[Any] = activation_function
__snake_case : int = init_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : Tuple = use_cache
__snake_case : Dict = encoder_layers
__snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : List[str] = router_z_loss_coef
__snake_case : int = router_aux_loss_coef
__snake_case : Union[str, Any] = decoder_sparse_step
__snake_case : Tuple = encoder_sparse_step
__snake_case : int = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Optional[int] = router_dtype
__snake_case : Tuple = router_ignore_padding_tokens
__snake_case : List[Any] = batch_prioritized_routing
__snake_case : Optional[int] = second_expert_policy
__snake_case : Any = normalize_router_prob_before_dropping
__snake_case : List[Any] = moe_eval_capacity_token_fraction
__snake_case : Optional[int] = moe_token_dropout
__snake_case : int = output_router_logits
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , **a_ , )
| 102
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 0
|
from typing import Any
class __snake_case :
def __init__( self : Optional[int] , A_ : Any):
lowerCAmelCase_ : int = data
lowerCAmelCase_ : Any = None
def __repr__( self : Any):
return F"""Node({self.data})"""
class __snake_case :
def __init__( self : List[str]):
lowerCAmelCase_ : Optional[int] = None
def __iter__( self : Optional[int]):
lowerCAmelCase_ : List[Any] = self.head
while node:
yield node.data
lowerCAmelCase_ : int = node.next
def __len__( self : List[Any]):
return sum(1 for _ in self)
def __repr__( self : List[str]):
return "->".join([str(A_) for item in self])
def __getitem__( self : Optional[int] , A_ : int):
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self : str , A_ : int , A_ : Any):
if not 0 <= index < len(self):
raise ValueError('''list index out of range.''')
lowerCAmelCase_ : Optional[Any] = self.head
for _ in range(A_):
lowerCAmelCase_ : Tuple = current.next
lowerCAmelCase_ : List[str] = data
def UpperCAmelCase__ ( self : List[Any] , A_ : Any):
self.insert_nth(len(self) , A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Any):
self.insert_nth(0 , A_)
def UpperCAmelCase__ ( self : Tuple , A_ : int , A_ : Any):
if not 0 <= index <= len(self):
raise IndexError('''list index out of range''')
lowerCAmelCase_ : Any = Node(A_)
if self.head is None:
lowerCAmelCase_ : Any = new_node
elif index == 0:
lowerCAmelCase_ : List[Any] = self.head # link new_node to head
lowerCAmelCase_ : Union[str, Any] = new_node
else:
lowerCAmelCase_ : Optional[Any] = self.head
for _ in range(index - 1):
lowerCAmelCase_ : Union[str, Any] = temp.next
lowerCAmelCase_ : str = temp.next
lowerCAmelCase_ : List[str] = new_node
def UpperCAmelCase__ ( self : List[Any]): # print every node data
print(self)
def UpperCAmelCase__ ( self : str):
return self.delete_nth(0)
def UpperCAmelCase__ ( self : Any): # delete from tail
return self.delete_nth(len(self) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int = 0):
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('''List index out of range.''')
lowerCAmelCase_ : Optional[Any] = self.head # default first node
if index == 0:
lowerCAmelCase_ : Any = self.head.next
else:
lowerCAmelCase_ : int = self.head
for _ in range(index - 1):
lowerCAmelCase_ : Dict = temp.next
lowerCAmelCase_ : Any = temp.next
lowerCAmelCase_ : str = temp.next.next
return delete_node.data
def UpperCAmelCase__ ( self : str):
return self.head is None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : List[Any] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase_ : Optional[int] = current.next
# Make the current node's next point backwards
lowerCAmelCase_ : Tuple = prev
# Make the previous node be the current node
lowerCAmelCase_ : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase_ : List[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase_ : Tuple = prev
def UpperCamelCase( ):
lowerCAmelCase_ : int = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
lowerCAmelCase_ : int = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = [
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowerCAmelCase_ : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase_ : int = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase_ : Optional[int] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase_ : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCamelCase( ):
from doctest import testmod
testmod()
lowerCAmelCase_ : Any = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__UpperCamelCase )
print('''\nReading/changing Node data using indexing:''' )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase_ : Optional[Any] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__UpperCamelCase )
print(f"""length of linked_list is : {len(__UpperCamelCase )}""" )
if __name__ == "__main__":
main()
| 103
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase = 2**power
__lowercase = 0
while n:
__lowercase , __lowercase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 104
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200_0000 ) ->int:
'''simple docstring'''
a : list[int] = [0]
a : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a : int = 0
# the area corresponding to the grid that gives the product closest to target
a : int = 0
# an estimate of b, using the quadratic formula
a : float
# the largest integer less than b_estimate
a : int
# the largest integer less than b_estimate
a : int
# the triangle number corresponding to b_floor
a : int
# the triangle number corresponding to b_ceil
a : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a : Union[str, Any] = floor(_lowercase )
a : str = ceil(_lowercase )
a : Tuple = triangle_numbers[b_floor]
a : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a : str = triangle_b_first_guess * triangle_a
a : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a : Dict = triangle_b_second_guess * triangle_a
a : str = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 0
|
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase__ : Union[str, Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 ,width=0.25 )
lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Optional[int] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Dict = VGroup(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Dict = Text('''CPU''' ,font_size=2_4 )
lowerCAmelCase__ : Optional[int] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : str = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Dict = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : List[Any] = Text('''GPU''' ,font_size=2_4 )
lowerCAmelCase__ : List[str] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : str = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : int = Text('''Model''' ,font_size=2_4 )
lowerCAmelCase__ : Optional[int] = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Tuple = []
for i, rect in enumerate(lowercase_ ):
lowerCAmelCase__ : Optional[Any] = fill.copy().set_fill(lowercase_ ,opacity=0.8 )
target.move_to(lowercase_ )
model_arr.append(lowercase_ )
lowerCAmelCase__ : int = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase_ ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowercase_ )
self.add(*lowercase_ ,*lowercase_ )
lowerCAmelCase__ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Tuple = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[int] = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Tuple = VGroup(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0 )
lowerCAmelCase__ : Dict = Text('''Disk''' ,font_size=2_4 )
lowerCAmelCase__ : int = Group(lowercase_ ,lowercase_ ).arrange(lowercase_ ,buff=0.5 ,aligned_edge=lowercase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : str = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=1_8 ,)
blue_text.next_to(lowercase_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(lowercase_ )
lowerCAmelCase__ : List[str] = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) )
lowerCAmelCase__ : List[Any] = Square(0.3 )
input.set_fill(lowercase_ ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,lowercase_ ,buff=0.5 )
self.play(Write(lowercase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=lowercase_ ,buff=0.02 )
self.play(MoveToTarget(lowercase_ ) )
self.play(FadeOut(lowercase_ ) )
lowerCAmelCase__ : str = Arrow(start=lowercase_ ,end=lowercase_ ,color=lowercase_ ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,lowercase_ ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase__ : Tuple = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ,run_time=3 ) )
lowerCAmelCase__ : Optional[int] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(lowercase_ ) ,Circumscribe(model_arr[0] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(model_cpu_arr[0] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(gpu_rect[0] ,color=lowercase_ ,**lowercase_ ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase__ : List[Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,lowercase_ ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase__ : List[str] = AnimationGroup(
FadeOut(lowercase_ ,run_time=0.5 ) ,MoveToTarget(lowercase_ ,run_time=0.5 ) ,FadeIn(lowercase_ ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(lowercase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase__ : Union[str, Any] = 0.7
self.play(
Circumscribe(model_arr[i] ,**lowercase_ ) ,Circumscribe(cpu_left_col_base[i] ,**lowercase_ ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(gpu_rect[0] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(model_arr[i + 1] ,color=lowercase_ ,**lowercase_ ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(cpu_left_col_base[-1] ,color=lowercase_ ,**lowercase_ ) ,Circumscribe(gpu_rect[0] ,color=lowercase_ ,**lowercase_ ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase__ : List[str] = a_c
lowerCAmelCase__ : str = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(lowercase_ ) ,FadeOut(lowercase_ ,run_time=0.5 ) ,)
lowerCAmelCase__ : List[Any] = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ,run_time=3 ) ,MoveToTarget(lowercase_ ) )
self.wait()
| 106
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a__ ( SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
lowerCAmelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
i -= len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = i // 3
lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase : Union[str, Any] = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password )
lowerCAmelCase : List[str] = any(char in ascii_lowercase for char in password )
lowerCAmelCase : Optional[int] = any(char in digits for char in password )
lowerCAmelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase : int = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 108
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
A: str = 637_8137.0
A: List[Any] = 635_6752.31_4245
A: Any = 6_3_7_8_1_3_7
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
UpperCAmelCase : List[Any] = radians(UpperCamelCase )
UpperCAmelCase : Dict = radians(UpperCamelCase )
# Equation
UpperCAmelCase : Dict = sin((phi_a - phi_a) / 2 )
UpperCAmelCase : List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase : List[str] = sqrt(sin_sq_phi + (cos(UpperCamelCase ) * cos(UpperCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCamelCase ( __A , __A=0.999 , __A="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase__ = []
for i in range(_lowerCAmelCase ):
UpperCamelCase__ = i / num_diffusion_timesteps
UpperCamelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class lowercase_ ( A__ , A__ ):
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self , a = 10_00 , a = 0.0_0085 , a = 0.012 , a = "linear" , a = None , a = "epsilon" , a = False , a = False , a = 1.0 , a = "linspace" , a = 0 , ):
if trained_betas is not None:
UpperCamelCase__ = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase__ = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ = betas_for_alpha_bar(snake_case_ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCamelCase__ = betas_for_alpha_bar(snake_case_ , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCamelCase__ = 1.0 - self.betas
UpperCamelCase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = use_karras_sigmas
def __a ( self , a , a=None ):
if schedule_timesteps is None:
UpperCamelCase__ = self.timesteps
UpperCamelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase__ = 1 if len(snake_case_ ) > 1 else 0
else:
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
UpperCamelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __a ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __a ( self , a , a , ):
UpperCamelCase__ = self.index_for_timestep(snake_case_ )
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __a ( self , a , a = None , a = None , ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase__ = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCamelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase__ = np.log(snake_case_ )
UpperCamelCase__ = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
if self.config.use_karras_sigmas:
UpperCamelCase__ = self._convert_to_karras(in_sigmas=snake_case_ , num_inference_steps=self.num_inference_steps )
UpperCamelCase__ = np.array([self._sigma_to_t(snake_case_ , snake_case_ ) for sigma in sigmas] )
UpperCamelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase__ = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
UpperCamelCase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase__ = torch.from_numpy(snake_case_ )
UpperCamelCase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
UpperCamelCase__ = timesteps.to(snake_case_ , dtype=torch.floataa )
else:
UpperCamelCase__ = timesteps.to(device=snake_case_ )
# empty dt and derivative
UpperCamelCase__ = None
UpperCamelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase__ = defaultdict(snake_case_ )
def __a ( self , a , a ):
# get log sigma
UpperCamelCase__ = np.log(snake_case_ )
# get distribution
UpperCamelCase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase__ = low_idx + 1
UpperCamelCase__ = log_sigmas[low_idx]
UpperCamelCase__ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase__ = (low - log_sigma) / (low - high)
UpperCamelCase__ = np.clip(snake_case_ , 0 , 1 )
# transform interpolation to time range
UpperCamelCase__ = (1 - w) * low_idx + w * high_idx
UpperCamelCase__ = t.reshape(sigma.shape )
return t
def __a ( self , a , a ):
UpperCamelCase__ = in_sigmas[-1].item()
UpperCamelCase__ = in_sigmas[0].item()
UpperCamelCase__ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase__ = np.linspace(0 , 1 , snake_case_ )
UpperCamelCase__ = sigma_min ** (1 / rho)
UpperCamelCase__ = sigma_max ** (1 / rho)
UpperCamelCase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __a ( self ):
return self.dt is None
def __a ( self , a , a , a , a = True , ):
UpperCamelCase__ = self.index_for_timestep(snake_case_ )
# advance index counter by 1
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase__ = self.sigmas[step_index - 1]
UpperCamelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase__ = 0
UpperCamelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase__ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCamelCase__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase__ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase__ = derivative
UpperCamelCase__ = dt
UpperCamelCase__ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase__ = (sample - pred_original_sample) / sigma_next
UpperCamelCase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase__ = self.dt
UpperCamelCase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __a ( self , a , a , a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
UpperCamelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase__ = self.timesteps.to(original_samples.device )
UpperCamelCase__ = timesteps.to(original_samples.device )
UpperCamelCase__ = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
UpperCamelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase__ = sigma.unsqueeze(-1 )
UpperCamelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 80
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ) -> Union[str, Any]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_lowerCAmelCase : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
A = True if """large""" in model_name or """huge""" in model_name else False
A = True if """large""" in model_name or """huge""" in model_name else False
A = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A = [3, 3, 3, 3]
A = [5, 5, 5, 5]
elif "fl4" in model_name:
A = [4, 4, 4, 4]
A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A = [3, 3, 3, 3]
if "lrf" in model_name:
A = [3, 3, 3, 3]
else:
A = [2, 2, 2, 2]
if "tiny" in model_name:
A = 96
elif "small" in model_name:
A = 96
elif "base" in model_name:
A = 128
elif "large" in model_name:
A = 192
elif "xlarge" in model_name:
A = 256
elif "huge" in model_name:
A = 352
# set label information
A = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
A = """imagenet-22k-id2label.json"""
else:
A = """imagenet-1k-id2label.json"""
A = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = FocalNetConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , focal_levels=_lowerCAmelCase , focal_windows=_lowerCAmelCase , use_conv_embed=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , use_post_layernorm=_lowerCAmelCase , use_layerscale=_lowerCAmelCase , )
return config
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
A = """encoder.""" + name
if "encoder.layers" in name:
A = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
A = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
A = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
A = """layernorm.weight"""
if name == "norm.bias":
A = """layernorm.bias"""
if "head" in name:
A = name.replace("""head""" , """classifier""" )
else:
A = """focalnet.""" + name
return name
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
A = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
A = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _lowerCAmelCase )
A = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
A = state_dict.pop(_lowerCAmelCase )
A = val
A = get_focalnet_config(_lowerCAmelCase )
A = FocalNetForImageClassification(_lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCAmelCase )
# verify conversion
A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A = BitImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase , crop_size=224 , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , )
A = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
A = processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCAmelCase , atol=1E-4 )
A = model(**_lowerCAmelCase )
A = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
A = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
A = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
A = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
A = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
A = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
_lowerCamelCase : str = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 258
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 0
|
from __future__ import annotations
from cmath import sqrt
def a_ ( _A , _A , _A ) -> Any:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case__ = b * b - 4 * a * c
snake_case__ = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
snake_case__ = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ , snake_case__ = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 307
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
UpperCAmelCase_ = 8
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any]=BITS ):
'''simple docstring'''
__lowerCamelCase = x.device
__lowerCamelCase = (x * 255).int().clamp(0 , 255 )
__lowerCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase )
__lowerCamelCase = rearrange(_lowerCAmelCase , """d -> d 1 1""" )
__lowerCamelCase = rearrange(_lowerCAmelCase , """b c h w -> b c 1 h w""" )
__lowerCamelCase = ((x & mask) != 0).float()
__lowerCamelCase = rearrange(_lowerCAmelCase , """b c d h w -> b (c d) h w""" )
__lowerCamelCase = bits * 2 - 1
return bits
def lowerCamelCase__ ( A__ : Dict , A__ : Union[str, Any]=BITS ):
'''simple docstring'''
__lowerCamelCase = x.device
__lowerCamelCase = (x > 0).int()
__lowerCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa )
__lowerCamelCase = rearrange(_lowerCAmelCase , """d -> d 1 1""" )
__lowerCamelCase = rearrange(_lowerCAmelCase , """b (c d) h w -> b c d h w""" , d=8 )
__lowerCamelCase = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCamelCase__ ( self : int , A__ : Optional[int] , A__ : List[str] , A__ : Any , A__ : int = 0.0 , A__ : Optional[Any] = True , A__ : Tuple=None , A__ : Dict = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCamelCase = self.alphas_cumprod[timestep]
__lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCamelCase = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCamelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase )
__lowerCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCamelCase = model_output.device if torch.is_tensor(_lowerCAmelCase ) else """cpu"""
__lowerCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCamelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise
__lowerCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , A__ : Union[str, Any] , A__ : int , A__ : Optional[Any] , A__ : Dict="epsilon" , A__ : Any=None , A__ : Dict = True , ):
'''simple docstring'''
__lowerCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCamelCase, __lowerCamelCase = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
__lowerCamelCase = self.alphas_cumprod[t]
__lowerCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
__lowerCamelCase = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase = 0
if t > 0:
__lowerCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device )
__lowerCamelCase = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
class lowerCamelCase__( A__):
def __init__( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] = 1.0 , ):
super().__init__()
__lowerCamelCase = bit_scale
__lowerCamelCase = (
ddim_bit_scheduler_step if isinstance(snake_case_ , snake_case_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: str = 2_56 , UpperCamelCase_: Tuple = 2_56 , UpperCamelCase_: Dict = 50 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 1 , UpperCamelCase_: List[Any] = "pil" , UpperCamelCase_: Any = True , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=snake_case_ , )
__lowerCamelCase = decimal_to_bits(snake_case_ ) * self.bit_scale
__lowerCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCamelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
__lowerCamelCase = bits_to_decimal(snake_case_ )
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 12
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
return str(_lowerCAmelCase ) == str(_lowerCAmelCase )[::-1]
def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
return int(_lowerCAmelCase ) + int(str(_lowerCAmelCase )[::-1] )
def UpperCamelCase_ ( _UpperCAmelCase : int = 10_000 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
for num in range(1 , _lowerCAmelCase ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = num
while iterations < 50:
_UpperCAmelCase : Dict = sum_reverse(_lowerCAmelCase )
iterations += 1
if is_palindrome(_lowerCAmelCase ):
break
else:
lychrel_nums.append(_lowerCAmelCase )
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 31
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a__ ( A__ ):
def __lt__( self : Optional[int] , a : Any ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , a : List[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
# sort into stacks
for element in collection:
__lowerCamelCase = Stack([element] )
__lowerCamelCase = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
__lowerCamelCase = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCAmelCase =input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase =[int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 67
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
__magic_name__ = None
def snake_case ( A__ ,A__ ,):
import pyspark
def generate_fn():
UpperCAmelCase_ : Optional[int] = df.select("*" ,pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
UpperCAmelCase_ : Union[str, Any] = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
UpperCAmelCase_ : Dict = partition_df.collect()
UpperCAmelCase_ : Tuple = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase_ (_BaseExamplesIterable ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , ) -> Any:
UpperCAmelCase_ : Tuple = df
UpperCAmelCase_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> str:
yield from self.generate_examples_fn()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str ) -> "SparkExamplesIterable":
UpperCAmelCase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> "SparkExamplesIterable":
UpperCAmelCase_ : str = self.split_shard_indices_by_worker(snake_case_ , snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return len(self.partition_order )
class UpperCamelCase_ (datasets.DatasetBuilder ):
__magic_name__ = SparkConfig
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[int]:
import pyspark
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ : Union[str, Any] = df
UpperCAmelCase_ : Optional[int] = working_dir
super().__init__(
cache_dir=snake_case_ , config_name=str(self.df.semanticHash() ) , **snake_case_ , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case_ )
UpperCAmelCase_ : Tuple = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict ) -> List[str]:
import pyspark
def get_arrow_batch_size(lowerCAmelCase_ : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
UpperCAmelCase_ : Tuple = self.df.count()
UpperCAmelCase_ : Optional[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ : Optional[Any] = (
self.df.limit(snake_case_ )
.repartition(1 )
.mapInArrow(snake_case_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ : List[Any] = min(snake_case_ , int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ : List[Any] = self.df.repartition(snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
UpperCAmelCase_ : List[Any] = ParquetWriter if file_format == "parquet" else ArrowWriter
UpperCAmelCase_ : Any = os.path.join(self._working_dir , os.path.basename(snake_case_ ) ) if self._working_dir else fpath
UpperCAmelCase_ : Optional[int] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ : str = self.config.features
UpperCAmelCase_ : Optional[Any] = self._writer_batch_size
UpperCAmelCase_ : List[Any] = self._fs.storage_options
def write_arrow(lowerCAmelCase_ : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ : Any = next(snake_case_ , snake_case_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = writer_class(
features=snake_case_ , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
UpperCAmelCase_ : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
UpperCAmelCase_ : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
UpperCAmelCase_ : str = pa.Table.from_batches([batch] )
writer.write_table(snake_case_ )
if writer._num_bytes > 0:
UpperCAmelCase_ , UpperCAmelCase_ : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case_ ) ):
UpperCAmelCase_ : List[Any] = os.path.join(os.path.dirname(snake_case_ ) , os.path.basename(snake_case_ ) )
shutil.move(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = (
self.df.mapInArrow(snake_case_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "arrow" , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : List[Any] , ) -> List[Any]:
self._validate_cache_dir()
UpperCAmelCase_ : Any = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case_ )
UpperCAmelCase_ : Any = not is_remote_filesystem(self._fs )
UpperCAmelCase_ : str = os.path.join if is_local else posixpath.join
UpperCAmelCase_ : Optional[Any] = "-TTTTT-SSSSS-of-NNNNN"
UpperCAmelCase_ : Union[str, Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCAmelCase_ : List[Any] = path_join(self._output_dir , snake_case_ )
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
for task_id, content in self._prepare_split_single(snake_case_ , snake_case_ , snake_case_ ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case_ )
UpperCAmelCase_ : Dict = total_num_examples
UpperCAmelCase_ : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCAmelCase_ : Optional[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , ):
rename(
snake_case_ , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : int = 0
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = task_id_and_num_shards[i]
for shard_id in range(snake_case_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case_ , len(snake_case_ ) ).map(lambda lowerCAmelCase_ : _rename_shard(*snake_case_ ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(snake_case_ , "" ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any] , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 268
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
'''simple docstring'''
from __future__ import annotations
__a = [True] * 1_000_001
__a = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
__a = False
i += 1
def __UpperCAmelCase ( a_: List[str] ):
return seive[n]
def __UpperCAmelCase ( a_: Tuple ):
return any(digit in "02468" for digit in str(_lowerCAmelCase ) )
def __UpperCAmelCase ( a_: int = 1_000_000 ):
_UpperCAmelCase : str = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = str(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )]
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
result.append(_lowerCAmelCase )
return result
def __UpperCAmelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 145
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
__snake_case : str ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__snake_case : Tuple ={
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__snake_case : List[str] ={
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = (
list(range(ord('''!''') ,ord('''~''') + 1)) + list(range(ord('''¡''') ,ord('''¬''') + 1)) + list(range(ord('''®''') ,ord('''ÿ''') + 1))
)
lowerCAmelCase__ : Union[str, Any] = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowerCAmelCase)
cs.append(2**8 + n)
n += 1
lowerCAmelCase__ : int = [chr(_lowerCAmelCase) for n in cs]
return dict(zip(_lowerCAmelCase ,_lowerCAmelCase))
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : str = set()
lowerCAmelCase__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase__ : Tuple = char
return pairs
class lowerCamelCase__ ( A__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="replace" ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="<mask>" ,__lowerCamelCase=False ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else bos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else sep_token
lowerCAmelCase__ : Optional[Any] = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else cls_token
lowerCAmelCase__ : List[str] = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else unk_token
lowerCAmelCase__ : Tuple = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : int = AddedToken(snake_case_ ,lstrip=snake_case_ ,rstrip=snake_case_ ) if isinstance(snake_case_ ,snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ ,bos_token=snake_case_ ,eos_token=snake_case_ ,unk_token=snake_case_ ,sep_token=snake_case_ ,cls_token=snake_case_ ,pad_token=snake_case_ ,mask_token=snake_case_ ,add_prefix_space=snake_case_ ,**snake_case_ ,)
with open(snake_case_ ,encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ : str = json.load(snake_case_ )
lowerCAmelCase__ : int = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Optional[Any] = errors # how to handle errors in decoding
lowerCAmelCase__ : Dict = bytes_to_unicode()
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ ,encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ : Any = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Tuple = dict(zip(snake_case_ ,range(len(snake_case_ ) ) ) )
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Optional[int] = tuple(snake_case_ )
lowerCAmelCase__ : Dict = get_pairs(snake_case_ )
if not pairs:
return token
while True:
lowerCAmelCase__ : Optional[Any] = min(snake_case_ ,key=lambda __lowerCamelCase : self.bpe_ranks.get(snake_case_ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Dict = bigram
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Optional[int] = 0
while i < len(snake_case_ ):
try:
lowerCAmelCase__ : str = word.index(snake_case_ ,snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : str = tuple(snake_case_ )
lowerCAmelCase__ : Union[str, Any] = new_word
if len(snake_case_ ) == 1:
break
else:
lowerCAmelCase__ : Dict = get_pairs(snake_case_ )
lowerCAmelCase__ : Optional[int] = ''' '''.join(snake_case_ )
lowerCAmelCase__ : Optional[int] = word
return word
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = []
for token in re.findall(self.pat ,snake_case_ ):
lowerCAmelCase__ : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.encoder.get(snake_case_ ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(snake_case_ )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ''''''.join(snake_case_ )
lowerCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[Any] = os.path.join(
snake_case_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(
snake_case_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=snake_case_ ,ensure_ascii=snake_case_ ) + '''\n''' )
lowerCAmelCase__ : int = 0
with open(snake_case_ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ : Dict = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ ,token_ids_a=snake_case_ ,already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Dict = ''' ''' + text
return (text, kwargs)
| 129
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase ():
# Get the sagemaker specific mp parameters from smp_options variable.
__lowerCAmelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowerCAmelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowerCAmelCase = json.loads(_lowerCAmelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def A__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , snake_case_ , )
@cached_property
def A__ ( self ) -> "torch.device":
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__lowerCAmelCase = torch.device("""cpu""" )
__lowerCAmelCase = 0
elif is_sagemaker_model_parallel_available():
__lowerCAmelCase = smp.local_rank()
__lowerCAmelCase = torch.device("""cuda""" , snake_case_ )
__lowerCAmelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowerCAmelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__lowerCAmelCase = torch.device("""cuda""" , self.local_rank )
__lowerCAmelCase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case_ )
return device
@property
def A__ ( self ) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> Tuple:
return False
| 301
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=64 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : int = seq_length
lowercase : Union[str, Any] = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : int = use_labels
lowercase : int = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Any = type_sequence_label_size
lowercase : Dict = initializer_range
lowercase : Dict = num_labels
lowercase : Union[str, Any] = num_choices
lowercase : Union[str, Any] = scope
lowercase : List[str] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : str = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : int = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : int = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case_ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = self.prepare_config_and_inputs()
lowercase : Any = True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ )
lowercase : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = True
lowercase : List[Any] = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Optional[int] = model(snake_case_ ,attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : List[Any] = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Tuple = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Union[str, Any] = model(snake_case_ ,attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.num_labels
lowercase : int = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Union[str, Any] = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.num_labels
lowercase : List[str] = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = True
lowercase : Any = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
lowercase : List[Any] = model(snake_case_ ,attention_mask=snake_case_ ,use_cache=snake_case_ )
lowercase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase : str = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase : Dict = model(snake_case_ ,attention_mask=snake_case_ ,output_hidden_states=snake_case_ )
lowercase : str = output_from_no_past["""hidden_states"""][0]
lowercase : Optional[int] = model(
snake_case_ ,attention_mask=snake_case_ ,past_key_values=snake_case_ ,output_hidden_states=snake_case_ ,)["""hidden_states"""][0]
# select random slice
lowercase : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str = config_and_inputs
lowercase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_a : Tuple= (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_a : List[str]= (GPTNeoXForCausalLM,) if is_torch_available() else ()
_a : List[str]= (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : str= False
_a : Optional[Any]= False
_a : Any= False
_a : Tuple= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = GPTNeoXModelTester(self )
lowercase : Tuple = ConfigTester(self ,config_class=snake_case_ ,hidden_size=64 ,num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ ,snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int = ids_tensor([1, 10] ,config.vocab_size )
lowercase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : str = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
lowercase : Any = original_model(snake_case_ ).last_hidden_state
lowercase : List[Any] = original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : str = {"""type""": scaling_type, """factor""": 10.0}
lowercase : List[str] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
lowercase : int = scaled_model(snake_case_ ).last_hidden_state
lowercase : str = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ ,snake_case_ ,atol=1e-5 ) )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowercase : List[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
lowercase : Union[str, Any] = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowercase : Any = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
lowercase : Optional[int] = model.generate(**snake_case_ ,do_sample=snake_case_ ,max_new_tokens=20 )
lowercase : Tuple = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ ,snake_case_ )
| 20
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Any:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def _UpperCamelCase ( __A = 20 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
for i in range(1 , n + 1 ):
UpperCamelCase__ = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] ) -> List[Any]:
# save results
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , """config.json""" ) ):
os.remove(os.path.join(_lowerCAmelCase , """config.json""" ) )
if os.path.exists(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=False ) -> Dict:
_lowerCAmelCase : Union[str, Any] = 2
if unlogit:
_lowerCAmelCase : List[Any] = torch.pow(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = p * torch.log(_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = 0
return -plogp.sum(dim=-1 )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Dict:
logger.info("""lv, h >\t""" + """\t""".join(f'{x + 1}' for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + """\t""".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : int=True , _lowerCamelCase : str=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=False ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCAmelCase : Tuple = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
_lowerCAmelCase : Any = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
if head_mask is None:
_lowerCAmelCase : Any = torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = 0.0
_lowerCAmelCase : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCAmelCase : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_lowerCAmelCase ) , ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCAmelCase : List[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
_lowerCAmelCase : Dict = entropy(attn.detach() , _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Any = torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCAmelCase : Any = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(_lowerCAmelCase )
logger.info("""Head ranked by importance scores""" )
_lowerCAmelCase : List[str] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCAmelCase : List[str] = torch.arange(
head_importance.numel() , device=args.device )
_lowerCAmelCase : Optional[Any] = head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase )
_lowerCAmelCase : List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , _lowerCAmelCase , original_score * args.masking_threshold )
_lowerCAmelCase : int = torch.ones_like(_lowerCAmelCase )
_lowerCAmelCase : str = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCAmelCase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCAmelCase : List[str] = float("""Inf""" )
_lowerCAmelCase : Dict = head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_lowerCAmelCase : str = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_lowerCAmelCase : Optional[int] = new_head_mask.view(-1 )
_lowerCAmelCase : List[Any] = 0.0
_lowerCAmelCase : Any = new_head_mask.view_as(_lowerCAmelCase )
_lowerCAmelCase : str = new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_lowerCAmelCase : Dict = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : List[str] = datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = 1 / loss
_lowerCAmelCase : Any = datetime.now() - before_time
_lowerCAmelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() )
_lowerCAmelCase : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase : Any = [
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
_lowerCAmelCase : Optional[int] = datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , )
_lowerCAmelCase : Dict = 1 / loss
_lowerCAmelCase : Any = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _lowerCAmelCase , _lowerCAmelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(_lowerCAmelCase , args.output_dir )
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=_lowerCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=_lowerCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=_lowerCAmelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=_lowerCAmelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=_lowerCAmelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=_lowerCAmelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=42 )
parser.add_argument("""--local_rank""" , type=_lowerCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
_lowerCAmelCase : List[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_lowerCAmelCase : Dict = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCAmelCase : Any = torch.device("""cuda""" , args.local_rank )
_lowerCAmelCase : Tuple = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCAmelCase : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCAmelCase : Any = nn.parallel.DistributedDataParallel(
_lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
_lowerCAmelCase : Union[str, Any] = nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase )
# Prepare dataset
_lowerCAmelCase : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCAmelCase : Any = (torch.from_numpy(_lowerCAmelCase ),)
_lowerCAmelCase : Optional[Any] = TensorDataset(*_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = RandomSampler(_lowerCAmelCase )
_lowerCAmelCase : Union[str, Any] = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCAmelCase : Dict = mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 309
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
_lowerCamelCase : List[Any] = {'target_lang': 'fi', 'source_lang': 'en'}
_lowerCamelCase : Optional[Any] = '>>zh<<'
_lowerCamelCase : Dict = 'Helsinki-NLP/'
if is_torch_available():
_lowerCamelCase : Union[str, Any] = 'pt'
elif is_tf_available():
_lowerCamelCase : Optional[Any] = 'tf'
else:
_lowerCamelCase : Tuple = 'jax'
@require_sentencepiece
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MarianTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def A (self : List[Any] ):
super().setUp()
A = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
A = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A (self : Optional[int] , **_lowerCAmelCase : Dict ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A (self : Any , _lowerCAmelCase : Union[str, Any] ):
return (
"This is a test",
"This is a test",
)
def A (self : Union[str, Any] ):
A = """</s>"""
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A (self : Dict ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(snake_case_ ) , 9 )
def A (self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A (self : Dict ):
A = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
A = en_de_tokenizer(["""I am a small frog"""] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
A = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
A = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
A = [x.name for x in Path(snake_case_ ).glob("""*""" )]
self.assertIn("""source.spm""" , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def A (self : Any ):
A = self.get_tokenizer()
A = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A (self : Optional[int] ):
# fmt: off
A = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def A (self : Optional[Any] ):
A = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
A = """Tämä on testi"""
A = """This is a test"""
A = [76, 7, 2047, 2]
A = [69, 12, 11, 940, 2]
A = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
A = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
A = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 258
|
"""simple docstring"""
import sys
import turtle
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 301
| 0
|
import random
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
snake_case__ , snake_case__ , snake_case__ = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCAmelCase )
elif element > pivot:
greater.append(_lowerCAmelCase )
else:
equal.append(_lowerCAmelCase )
return less, equal, greater
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowerCAmelCase ) or index < 0:
return None
snake_case__ = items[random.randint(0 , len(_lowerCAmelCase ) - 1 )]
snake_case__ = 0
snake_case__ , snake_case__ , snake_case__ = _partition(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ = len(_lowerCAmelCase )
snake_case__ = len(_lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCAmelCase , _lowerCAmelCase )
# must be in larger
else:
return quick_select(_lowerCAmelCase , index - (m + count) )
| 307
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 0
|
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase__ ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
'''simple docstring'''
from timeit import timeit
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : int = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
def do_benchmark(_UpperCAmelCase : Union[str, Any] ) -> None:
_UpperCAmelCase : str = "import __main__ as z"
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(_lowerCAmelCase ) = }""" )
_UpperCAmelCase : Union[str, Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_lowerCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCAmelCase ) = }""" )
_UpperCAmelCase : str = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_lowerCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(_lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 31
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__UpperCAmelCase =CLIPImageProcessor()
__UpperCAmelCase =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
__UpperCAmelCase =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 67
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301
| 0
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
lowerCamelCase_ = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
lowerCamelCase_ = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def snake_case ( A__ ,A__ ):
return float((preds == labels).mean() )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = simple_accuracy(_lowerCAmelCase ,_lowerCAmelCase )
UpperCAmelCase_ : int = float(fa_score(y_true=_lowerCAmelCase ,y_pred=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = np.array(_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = np.array(_lowerCAmelCase )
UpperCAmelCase_ : str = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_ : Dict = en_sentvecs - np.mean(_lowerCAmelCase ,axis=0 )
UpperCAmelCase_ : Tuple = in_sentvecs - np.mean(_lowerCAmelCase ,axis=0 )
UpperCAmelCase_ : Tuple = cdist(_lowerCAmelCase ,_lowerCAmelCase ,"cosine" )
UpperCAmelCase_ : Dict = np.array(range(_lowerCAmelCase ) )
UpperCAmelCase_ : List[str] = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase_ : int = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case_ , snake_case_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 268
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__a = getLogger(__name__)
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCAmelCase ( a_: Tuple, a_: Optional[Any], a_: Union[str, Any], a_: str = 8, a_: Union[str, Any] = DEFAULT_DEVICE, a_: str=False, a_: Optional[Any]="summarization", a_: str=None, **a_: Any, ):
_UpperCAmelCase : Tuple = Path(_lowerCAmelCase ).open("w", encoding="utf-8" )
_UpperCAmelCase : List[str] = str(_lowerCAmelCase )
_UpperCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
_UpperCAmelCase : Dict = model.half()
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_UpperCAmelCase : Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase, _lowerCAmelCase )
if prefix is None:
_UpperCAmelCase : Union[str, Any] = prefix or getattr(model.config, "prefix", "" ) or ""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase, _lowerCAmelCase ) ) ):
_UpperCAmelCase : Tuple = [prefix + text for text in examples_chunk]
_UpperCAmelCase : List[str] = tokenizer(_lowerCAmelCase, return_tensors="pt", truncation=_lowerCAmelCase, padding="longest" ).to(_lowerCAmelCase )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **_lowerCAmelCase, )
_UpperCAmelCase : int = tokenizer.batch_decode(_lowerCAmelCase, skip_special_tokens=_lowerCAmelCase, clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
_UpperCAmelCase : Any = int(time.time() - start_time ) # seconds
_UpperCAmelCase : Dict = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )}
def __UpperCAmelCase ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __UpperCAmelCase ( a_: Any=True ):
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("model_name", type=_lowerCAmelCase, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path", type=_lowerCAmelCase, help="like cnn_dm/test.source" )
parser.add_argument("save_path", type=_lowerCAmelCase, help="where to save summaries" )
parser.add_argument("--reference_path", type=_lowerCAmelCase, required=_lowerCAmelCase, help="like cnn_dm/test.target" )
parser.add_argument("--score_path", type=_lowerCAmelCase, required=_lowerCAmelCase, default="metrics.json", help="where to save metrics" )
parser.add_argument("--device", type=_lowerCAmelCase, required=_lowerCAmelCase, default=_lowerCAmelCase, help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix", type=_lowerCAmelCase, required=_lowerCAmelCase, default=_lowerCAmelCase, help="will be added to the begininng of src examples" )
parser.add_argument("--task", type=_lowerCAmelCase, default="summarization", help="used for task_specific_params + metrics" )
parser.add_argument("--bs", type=_lowerCAmelCase, default=8, required=_lowerCAmelCase, help="batch size" )
parser.add_argument(
"--n_obs", type=_lowerCAmelCase, default=-1, required=_lowerCAmelCase, help="How many observations. Defaults to all." )
parser.add_argument("--fp16", action="store_true" )
parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results" )
parser.add_argument(
"--info", nargs="?", type=_lowerCAmelCase, const=datetime_now(), help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_UpperCAmelCase , _UpperCAmelCase : Any = parser.parse_known_args()
_UpperCAmelCase : List[str] = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_UpperCAmelCase : int = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_UpperCAmelCase : Any = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
_UpperCAmelCase : List[Any] = generate_summaries_or_translations(
_lowerCAmelCase, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **_lowerCAmelCase, )
if args.reference_path is None:
return {}
# Compute scores
_UpperCAmelCase : int = calculate_bleu if "translation" in args.task else calculate_rouge
_UpperCAmelCase : Any = [x.rstrip() for x in open(args.save_path ).readlines()]
_UpperCAmelCase : Optional[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
_UpperCAmelCase : Dict = score_fn(_lowerCAmelCase, _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
_UpperCAmelCase : Any = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase, open(args.score_path, "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 145
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] = 4000000):
'''simple docstring'''
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowerCAmelCase)
lowerCAmelCase__ , lowerCAmelCase__ : str = b, a + b
return sum(_lowerCAmelCase)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 129
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __snake_case :
_a : Optional[Any]= 42
_a : str= None
_a : int= None
lowercase : str = namedtuple("""CoinsDistribResult""", """moves excess""")
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCAmelCase ) != count_coins(_lowerCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase : int = get_distrib(node.left )
lowercase , lowercase : int = get_distrib(node.right )
lowercase : int = 1 - left_distrib_excess
lowercase : Dict = 1 - right_distrib_excess
lowercase : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCAmelCase )
+ abs(_lowerCAmelCase )
)
lowercase : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCAmelCase , _lowerCAmelCase )
return get_distrib(_lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301
| 0
|
'''simple docstring'''
from timeit import timeit
a__ : Tuple = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = len(_lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = len(_lowerCAmelCase ) // 2
UpperCamelCase__ = len(_lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCAmelCase ) )
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
if len(_lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(_lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCamelCase__ = F'''from __main__ import test_data, {name}'''
UpperCamelCase__ = 500000
UpperCamelCase__ = timeit(stmt=_lowerCAmelCase , setup=_lowerCAmelCase , number=_lowerCAmelCase )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 80
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Any:
_lowerCAmelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a_ (A__ , A__ , A__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = StableDiffusionLatentUpscalePipeline
__lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__lowerCAmelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__lowerCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase : int = frozenset([] )
__lowerCAmelCase : str = True
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Dict = (1_6, 1_6)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
_lowerCAmelCase : Tuple = EulerDiscreteScheduler(prediction_type="""sample""" )
_lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""quick_gelu""" , projection_dim=5_1_2 , )
_lowerCAmelCase : Optional[int] = CLIPTextModel(snake_case_ )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : Union[str, Any] = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Tuple = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : str = pipe(**snake_case_ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
_lowerCAmelCase : List[Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def __UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Union[str, Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase : Optional[Any] = getattr(snake_case_ , scheduler_enum.name )
_lowerCAmelCase : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase : Optional[Any] = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = torch.manual_seed(3_3 )
_lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
_lowerCAmelCase : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_lowerCAmelCase : Optional[int] = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
_lowerCAmelCase : List[str] = pipe(snake_case_ , generator=snake_case_ , output_type="""latent""" ).images
_lowerCAmelCase : Optional[int] = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type="""np""" , ).images[0]
_lowerCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = torch.manual_seed(3_3 )
_lowerCAmelCase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_lowerCAmelCase : Any = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
_lowerCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
_lowerCAmelCase : Dict = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type="""np""" , ).images[0]
_lowerCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 309
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=512 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Optional[Any]=None , ):
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = """gelu"""
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def A (self : List[Any] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A (self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
A = TFConvBertModel(config=snake_case_ )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A = [input_ids, input_mask]
A = model(snake_case_ )
A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
A = TFConvBertForMaskedLM(config=snake_case_ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = self.num_labels
A = TFConvBertForSequenceClassification(config=snake_case_ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A (self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
A = self.num_choices
A = TFConvBertForMultipleChoice(config=snake_case_ )
A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A (self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str ):
A = self.num_labels
A = TFConvBertForTokenClassification(config=snake_case_ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A (self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
A = TFConvBertForQuestionAnswering(config=snake_case_ )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A (self : Tuple ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Optional[int] ):
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A (self : Dict ):
self.config_tester.run_common_tests()
def A (self : Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A (self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def A (self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def A (self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def A (self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(snake_case_ , """use_cache""" ):
A = True
A = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A = getattr(self.model_tester , """key_length""" , snake_case_ )
for model_class in self.all_model_classes:
A = self._prepare_for_class(snake_case_ , snake_case_ )
A = model_class(snake_case_ )
A = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
A = os.path.join(snake_case_ , """saved_model""" , """1""" )
A = tf.keras.models.load_model(snake_case_ )
A = model(snake_case_ )
if self.is_encoder_decoder:
A = outputs["""encoder_hidden_states"""]
A = outputs["""encoder_attentions"""]
else:
A = outputs["""hidden_states"""]
A = outputs["""attentions"""]
self.assertEqual(len(snake_case_ ) , snake_case_ )
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A (self : Optional[Any] ):
A = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(snake_case_ )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
A = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A = getattr(self.model_tester , """key_length""" , snake_case_ )
A = getattr(self.model_tester , """key_length""" , snake_case_ )
def check_decoder_attentions_output(_lowerCAmelCase : Any ):
A = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowerCAmelCase : Tuple ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(snake_case_ )
A = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
A = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
A = model_class(snake_case_ )
A = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(snake_case_ )
A = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(snake_case_ )
A = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(snake_case_ )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
A = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-4 )
| 258
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[int]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**snake_case_ )
__lowerCAmelCase = do_lower_case
def A__ ( self , snake_case_ , **snake_case_ ) -> Tuple:
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("""text_pair""" , snake_case_ )
__lowerCAmelCase = kwargs.pop("""return_tensors""" , snake_case_ )
__lowerCAmelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = encoded_candidates.get("""input_ids""" )
__lowerCAmelCase = encoded_candidates.get("""attention_mask""" )
__lowerCAmelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 301
| 0
|
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCamelCase : Dict = TypeVar("""T""")
class __SCREAMING_SNAKE_CASE( Generic[T] ):
def __init__( self: str , UpperCamelCase: List[Any] = True ) -> None:
snake_case__ = {} # dictionary of lists
snake_case__ = directed
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
snake_case__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case__ = [destination_vertex]
snake_case__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
snake_case__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case__ = [destination_vertex]
snake_case__ = []
return self
def __repr__( self: Optional[int] ) -> str:
return pformat(self.adj_list )
| 307
|
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
def lowerCamelCase__ ( A__ : Any , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = int(_lowerCAmelCase )
# Initialize Result
__lowerCamelCase = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
UpperCAmelCase_ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 12
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__SCREAMING_SNAKE_CASE : str = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(A__ )
class lowerCamelCase_ (A__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = "rag"
__UpperCamelCase: List[str] = True
def __init__( self : Tuple , A : List[str]=None , A : Dict=True , A : Tuple=None , A : Optional[int]=None , A : int=None , A : List[str]=None , A : int=None , A : List[str]=" / " , A : int=" // " , A : Tuple=5 , A : List[str]=300 , A : List[Any]=768 , A : Optional[int]=8 , A : Dict="wiki_dpr" , A : Optional[int]="train" , A : Tuple="compressed" , A : List[Any]=None , A : str=None , A : Union[str, Any]=False , A : List[str]=False , A : Tuple=0.0 , A : List[Any]=True , A : List[str]=False , A : Optional[Any]=False , A : str=False , A : Union[str, Any]=True , A : List[Any]=None , **A : str , ):
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCAmelCase : int = kwargs.pop("question_encoder" )
_UpperCAmelCase : Optional[int] = question_encoder_config.pop("model_type" )
_UpperCAmelCase : str = kwargs.pop("generator" )
_UpperCAmelCase : Optional[int] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase : List[str] = AutoConfig.for_model(snake_case_ , **snake_case_ )
_UpperCAmelCase : List[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
_UpperCAmelCase : int = reduce_loss
_UpperCAmelCase : Union[str, Any] = label_smoothing
_UpperCAmelCase : List[Any] = exclude_bos_score
_UpperCAmelCase : Tuple = do_marginalize
_UpperCAmelCase : Optional[int] = title_sep
_UpperCAmelCase : Any = doc_sep
_UpperCAmelCase : Dict = n_docs
_UpperCAmelCase : List[Any] = max_combined_length
_UpperCAmelCase : Optional[int] = dataset
_UpperCAmelCase : List[str] = dataset_split
_UpperCAmelCase : List[Any] = index_name
_UpperCAmelCase : Optional[Any] = retrieval_vector_size
_UpperCAmelCase : Dict = retrieval_batch_size
_UpperCAmelCase : Tuple = passages_path
_UpperCAmelCase : str = index_path
_UpperCAmelCase : List[Any] = use_dummy_dataset
_UpperCAmelCase : Dict = output_retrieved
_UpperCAmelCase : int = do_deduplication
_UpperCAmelCase : Dict = use_cache
if self.forced_eos_token_id is None:
_UpperCAmelCase : Optional[Any] = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def _A ( cls : List[Any] , A : Tuple , A : Union[str, Any] , **A : int ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[str] = self.question_encoder.to_dict()
_UpperCAmelCase : Union[str, Any] = self.generator.to_dict()
_UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 31
|
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class a__ :
def __init__( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = psutil.Process()
__lowerCamelCase = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = -1
while True:
__lowerCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = threading.Thread(target=self.peak_monitor )
__lowerCamelCase = True
self.thread.start()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = False
self.thread.join()
return self.cpu_memory_peak
__UpperCAmelCase =PeakCPUMemory()
def __lowerCAmelCase ( ) -> List[Any]:
# Time
__lowerCamelCase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase = torch.cuda.memory_allocated(_lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
# Time
__lowerCamelCase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowerCamelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase = (torch.cuda.memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
__lowerCamelCase = (torch.cuda.max_memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**20
return measures
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(_lowerCAmelCase )]:.2f}MiB""" )
__lowerCamelCase = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 67
|
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 268
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A__ ( A__ ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''falcon'''
UpperCamelCase_ : List[str] = ['''past_key_values''']
def __init__( self : int , lowerCAmelCase__ : Dict=6_5_0_2_4 , lowerCAmelCase__ : Optional[int]=4_5_4_4 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Optional[Any]=7_1 , lowerCAmelCase__ : int=1e-5 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : List[str]=1_1 , lowerCAmelCase__ : List[Any]=1_1 , **lowerCAmelCase__ : List[str] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : List[str] = kwargs.pop("n_embed" , snake_case_ )
_UpperCAmelCase : Any = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = layer_norm_epsilon
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : Union[str, Any] = hidden_dropout
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : List[str] = bos_token_id
_UpperCAmelCase : Union[str, Any] = eos_token_id
_UpperCAmelCase : str = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : List[str] = alibi
_UpperCAmelCase : Optional[Any] = new_decoder_architecture
_UpperCAmelCase : List[Any] = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[Any] = parallel_attn
_UpperCAmelCase : int = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return not self.alibi
| 145
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.